text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# flake8: noqa
import sys
import subprocess
from .exceptions import PyperclipException
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
text_type = unicode if PY2 else str
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
from PyQt4.QtGui import QApplication
app = QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xclip', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xclip():
p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text.encode('utf-8')],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode('utf-8')
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
| andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/util/clipboard/clipboards.py | Python | apache-2.0 | 3,984 | [
"VisIt"
] | 935b334b814d5d12906dce55a00da748fe6b40b51f283c2f3c10e7b0c0322ab1 |
"""
The recovery operators used for lowest-order advection schemes.
"""
from enum import Enum
import ufl
from firedrake import (BrokenElement, Constant, DirichletBC, FiniteElement,
Function, FunctionSpace, Interpolator, Projector,
SpatialCoordinate, TensorProductElement,
VectorFunctionSpace, as_vector, function, interval)
from firedrake.utils import cached_property
from gusto import kernels
__all__ = ["Averager", "Boundary_Method", "Boundary_Recoverer", "Recoverer"]
class Averager(object):
"""
An object that 'recovers' a low order field (e.g. in DG0)
into a higher order field (e.g. in CG1).
The code is essentially that of the Firedrake Projector
object, using the "average" method, and could possibly
be replaced by it if it comes into the master branch.
:arg v: the :class:`ufl.Expr` or
:class:`.Function` to project.
:arg v_out: :class:`.Function` to put the result in.
"""
def __init__(self, v, v_out):
if not isinstance(v, (ufl.core.expr.Expr, function.Function)):
raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(v))
# Check shape values
if v.ufl_shape != v_out.ufl_shape:
raise RuntimeError('Shape mismatch between source %s and target function spaces %s in project' % (v.ufl_shape, v_out.ufl_shape))
self._same_fspace = (isinstance(v, function.Function) and v.function_space() == v_out.function_space())
self.v = v
self.v_out = v_out
self.V = v_out.function_space()
# Check the number of local dofs
if self.v_out.function_space().finat_element.space_dimension() != self.v.function_space().finat_element.space_dimension():
raise RuntimeError("Number of local dofs for each field must be equal.")
self.average_kernel = kernels.Average(self.V)
@cached_property
def _weighting(self):
"""
Generates a weight function for computing a projection via averaging.
"""
w = Function(self.V)
weight_kernel = kernels.AverageWeightings(self.V)
weight_kernel.apply(w)
return w
def project(self):
"""
Apply the recovery.
"""
# Ensure that the function being populated is zeroed out
self.v_out.dat.zero()
self.average_kernel.apply(self.v_out, self._weighting, self.v)
return self.v_out
class Boundary_Method(Enum):
"""
An Enum object storing the two types of boundary method:
dynamics -- which corrects a field recovered into CG1.
physics -- which corrects a field recovered into the temperature space.
"""
dynamics = 0
physics = 1
class Boundary_Recoverer(object):
"""
An object that performs a `recovery` process at the domain
boundaries that has second order accuracy. This is necessary
because the :class:`Averager` object does not recover a field
with sufficient accuracy at the boundaries.
The strategy is to minimise the curvature of the function in
the boundary cells, subject to the constraints of conserved
mass and continuity on the interior facets. The quickest way
to perform this is by using the analytic solution and a parloop.
Currently this is only implemented for the (DG0, DG1, CG1)
set of spaces, and only on a `PeriodicIntervalMesh` or
'PeriodicUnitIntervalMesh` that has been extruded.
:arg v_CG1: the continuous function after the first recovery
is performed. Should be in CG1. This is correct
on the interior of the domain.
:arg v_DG1: the function to be output. Should be in DG1.
:arg method: a Boundary_Method Enum object.
:arg eff_coords: the effective coordinates of the iniital recovery.
This must be provided for the dynamics Boundary_Method.
"""
def __init__(self, v_CG1, v_DG1, method=Boundary_Method.physics, eff_coords=None):
self.v_DG1 = v_DG1
self.v_CG1 = v_CG1
self.v_DG1_old = Function(v_DG1.function_space())
self.eff_coords = eff_coords
self.method = method
mesh = v_CG1.function_space().mesh()
DG0 = FunctionSpace(mesh, "DG", 0)
CG1 = FunctionSpace(mesh, "CG", 1)
if DG0.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
DG1 = FunctionSpace(mesh, DG1_element)
self.num_ext = find_domain_boundaries(mesh)
# check function spaces of functions
if self.method == Boundary_Method.dynamics:
if v_CG1.function_space() != CG1:
raise NotImplementedError("This boundary recovery method requires v1 to be in CG1.")
if v_DG1.function_space() != DG1:
raise NotImplementedError("This boundary recovery method requires v_out to be in DG1.")
if eff_coords is None:
raise ValueError('Need eff_coords field for dynamics boundary methods')
elif self.method == Boundary_Method.physics:
# check that mesh is valid -- must be an extruded mesh
if not DG0.extruded:
raise NotImplementedError('The physics boundary method only works on extruded meshes')
# base spaces
cell = mesh._base_mesh.ufl_cell().cellname()
w_hori = FiniteElement("DG", cell, 0, variant="equispaced")
w_vert = FiniteElement("CG", interval, 1, variant="equispaced")
# build element
theta_element = TensorProductElement(w_hori, w_vert)
# spaces
Vtheta = FunctionSpace(mesh, theta_element)
Vtheta_broken = FunctionSpace(mesh, BrokenElement(theta_element))
if v_CG1.function_space() != Vtheta:
raise ValueError("This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace.")
if v_DG1.function_space() != Vtheta_broken:
raise ValueError("This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace.")
else:
raise ValueError("Boundary method should be a Boundary Method Enum object.")
vec_DG1 = VectorFunctionSpace(DG0.mesh(), DG1_element)
x = SpatialCoordinate(DG0.mesh())
self.interpolator = Interpolator(self.v_CG1, self.v_DG1)
if self.method == Boundary_Method.dynamics:
# STRATEGY
# obtain a coordinate field for all the nodes
self.act_coords = Function(vec_DG1).project(x) # actual coordinates
self.eff_coords = eff_coords # effective coordinates
self.output = Function(DG1)
self.on_exterior = find_domain_boundaries(mesh)
self.gaussian_elimination_kernel = kernels.GaussianElimination(DG1)
elif self.method == Boundary_Method.physics:
self.bottom_kernel = kernels.PhysicsRecoveryBottom()
self.top_kernel = kernels.PhysicsRecoveryTop()
def apply(self):
self.interpolator.interpolate()
if self.method == Boundary_Method.physics:
self.bottom_kernel.apply(self.v_DG1, self.v_CG1)
self.top_kernel.apply(self.v_DG1, self.v_CG1)
else:
self.v_DG1_old.assign(self.v_DG1)
self.gaussian_elimination_kernel.apply(self.v_DG1_old,
self.v_DG1,
self.act_coords,
self.eff_coords,
self.num_ext)
class Recoverer(object):
"""
An object that 'recovers' a field from a low order space
(e.g. DG0) into a higher order space (e.g. CG1). This encompasses
the process of interpolating first to a the right space before
using the :class:`Averager` object, and also automates the
boundary recovery process. If no boundary method is specified,
this simply performs the action of the :class: `Averager`.
:arg v_in: the :class:`ufl.Expr` or
:class:`.Function` to project. (e.g. a DG0 function)
:arg v_out: :class:`.Function` to put the result in. (e.g. a CG1 function)
:arg VDG: optional :class:`.FunctionSpace`. If not None, v_in is interpolated
to this space first before recovery happens.
:arg boundary_method: an Enum object, .
"""
def __init__(self, v_in, v_out, VDG=None, boundary_method=None):
# check if v_in is valid
if not isinstance(v_in, (ufl.core.expr.Expr, function.Function)):
raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(v_in))
self.v_in = v_in
self.v_out = v_out
self.V = v_out.function_space()
if VDG is not None:
self.v = Function(VDG)
self.interpolator = Interpolator(v_in, self.v)
else:
self.v = v_in
self.interpolator = None
self.VDG = VDG
self.boundary_method = boundary_method
self.averager = Averager(self.v, self.v_out)
# check boundary method options are valid
if boundary_method is not None:
if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
raise ValueError("Boundary method must be a Boundary_Method Enum object.")
if VDG is None:
raise ValueError("If boundary_method is specified, VDG also needs specifying.")
# now specify things that we'll need if we are doing boundary recovery
if boundary_method == Boundary_Method.physics:
# check dimensions
if self.V.value_size != 1:
raise ValueError('This method only works for scalar functions.')
self.boundary_recoverer = Boundary_Recoverer(self.v_out, self.v, method=Boundary_Method.physics)
else:
mesh = self.V.mesh()
# this ensures we get the pure function space, not an indexed function space
V0 = FunctionSpace(mesh, self.v_in.function_space().ufl_element())
CG1 = FunctionSpace(mesh, "CG", 1)
eff_coords = find_eff_coords(V0)
if V0.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
DG1 = FunctionSpace(mesh, DG1_element)
if self.V.value_size == 1:
self.boundary_recoverer = Boundary_Recoverer(self.v_out, self.v,
method=Boundary_Method.dynamics,
eff_coords=eff_coords)
else:
# now, break the problem down into components
v_scalars = []
v_out_scalars = []
self.boundary_recoverers = []
self.project_to_scalars_CG = []
self.extra_averagers = []
for i in range(self.V.value_size):
v_scalars.append(Function(DG1))
v_out_scalars.append(Function(CG1))
self.project_to_scalars_CG.append(Projector(self.v_out[i], v_out_scalars[i]))
self.boundary_recoverers.append(Boundary_Recoverer(v_out_scalars[i], v_scalars[i],
method=Boundary_Method.dynamics,
eff_coords=eff_coords[i]))
# need an extra averager that works on the scalar fields rather than the vector one
self.extra_averagers.append(Averager(v_scalars[i], v_out_scalars[i]))
# the boundary recoverer needs to be done on a scalar fields
# so need to extract component and restore it after the boundary recovery is done
self.interpolate_to_vector = Interpolator(as_vector(v_out_scalars), self.v_out)
def project(self):
"""
Perform the fully specified recovery.
"""
if self.interpolator is not None:
self.interpolator.interpolate()
self.averager.project()
if self.boundary_method is not None:
if self.V.value_size > 1:
for i in range(self.V.value_size):
self.project_to_scalars_CG[i].project()
self.boundary_recoverers[i].apply()
self.extra_averagers[i].project()
self.interpolate_to_vector.interpolate()
else:
self.boundary_recoverer.apply()
self.averager.project()
return self.v_out
def find_eff_coords(V0):
"""
Takes a function in a field V0 and returns the effective coordinates,
in a vector DG1 space, of a recovery into a CG1 field. This is for use with the
Boundary_Recoverer, as it facilitates the Gaussian elimination used to get
second-order recovery at boundaries.
If V0 is a vector function space, this returns an array of coordinates for
each component.
:arg V0: the original function space.
"""
mesh = V0.mesh()
if V0.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)
vec_DG1 = VectorFunctionSpace(mesh, DG1_element)
x = SpatialCoordinate(mesh)
if V0.ufl_element().value_size() > 1:
eff_coords_list = []
V0_coords_list = []
# treat this separately for each component
for i in range(V0.ufl_element().value_size()):
# fill an d-dimensional list with i-th coordinate
x_list = [x[i] for j in range(V0.ufl_element().value_size())]
# the i-th element in V0_coords_list is a vector with all components the i-th coord
ith_V0_coords = Function(V0).project(as_vector(x_list))
V0_coords_list.append(ith_V0_coords)
for i in range(V0.ufl_element().value_size()):
# slice through V0_coords_list to obtain the coords of the DOFs for that component
x_list = [V0_coords[i] for V0_coords in V0_coords_list]
# average these to find effective coords in CG1
V0_coords_in_DG1 = Function(vec_DG1).interpolate(as_vector(x_list))
eff_coords_in_CG1 = Function(vec_CG1)
eff_coords_averager = Averager(V0_coords_in_DG1, eff_coords_in_CG1)
eff_coords_averager.project()
# obtain these in DG1
eff_coords_in_DG1 = Function(vec_DG1).interpolate(eff_coords_in_CG1)
eff_coords_list.append(correct_eff_coords(eff_coords_in_DG1))
return eff_coords_list
else:
# find the coordinates at DOFs in V0
vec_V0 = VectorFunctionSpace(mesh, V0.ufl_element())
V0_coords = Function(vec_V0).project(x)
# average these to find effective coords in CG1
V0_coords_in_DG1 = Function(vec_DG1).interpolate(V0_coords)
eff_coords_in_CG1 = Function(vec_CG1)
eff_coords_averager = Averager(V0_coords_in_DG1, eff_coords_in_CG1)
eff_coords_averager.project()
# obtain these in DG1
eff_coords_in_DG1 = Function(vec_DG1).interpolate(eff_coords_in_CG1)
return correct_eff_coords(eff_coords_in_DG1)
def correct_eff_coords(eff_coords):
"""
Correct the effective coordinates calculated by simply averaging
which will not be correct at periodic boundaries.
:arg eff_coords: the effective coordinates in vec_DG1 space.
"""
mesh = eff_coords.function_space().mesh()
vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)
if vec_CG1.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
vec_DG1 = VectorFunctionSpace(mesh, DG1_element)
x = SpatialCoordinate(mesh)
if eff_coords.function_space() != vec_DG1:
raise ValueError('eff_coords needs to be in the vector DG1 space')
# obtain different coords in DG1
DG1_coords = Function(vec_DG1).interpolate(x)
CG1_coords_from_DG1 = Function(vec_CG1)
averager = Averager(DG1_coords, CG1_coords_from_DG1)
averager.project()
DG1_coords_from_averaged_CG1 = Function(vec_DG1).interpolate(CG1_coords_from_DG1)
DG1_coords_diff = Function(vec_DG1).interpolate(DG1_coords - DG1_coords_from_averaged_CG1)
# interpolate coordinates, adjusting those different coordinates
adjusted_coords = Function(vec_DG1)
adjusted_coords.interpolate(eff_coords + DG1_coords_diff)
return adjusted_coords
def find_domain_boundaries(mesh):
"""
Makes a scalar DG0 function whose values are 0. everywhere except for in
cells on the boundary of the domain, where the values are 1.0.
This allows boundary cells to be identified easily.
:arg mesh: the mesh.
"""
DG0 = FunctionSpace(mesh, "DG", 0)
CG1 = FunctionSpace(mesh, "CG", 1)
on_exterior_DG0 = Function(DG0)
on_exterior_CG1 = Function(CG1)
# we get values in CG1 initially as DG0 will not work for triangular elements
bc_codes = ['on_boundary', 'top', 'bottom']
bcs = [DirichletBC(CG1, Constant(1.0), bc_code) for bc_code in bc_codes]
for bc in bcs:
try:
bc.apply(on_exterior_CG1)
except ValueError:
pass
on_exterior_DG0.interpolate(on_exterior_CG1)
return on_exterior_DG0
| firedrakeproject/gusto | gusto/recovery.py | Python | mit | 19,027 | [
"Gaussian"
] | 80622bc72f768050071e2f850b64f24443a9323b71918afcfdb5baef75b78a83 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [images.get_shape()[0], -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=None)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| jiaphuan/models | tutorials/image/cifar10/cifar10.py | Python | apache-2.0 | 14,665 | [
"Gaussian"
] | e273876b2a9a5d1f93768c531c28a0660b01b461495b594a81b9118342c41663 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkXMLPImageDataWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkXMLPImageDataWriter(), 'Writing vtkXMLPImageData.',
('vtkXMLPImageData',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkXMLPImageDataWriter.py | Python | bsd-3-clause | 504 | [
"VTK"
] | 8af5db4e66a0717a1745a4bde930dc92f884e274774214e7193594647f5f555f |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""
Demo #2
The second script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script is a bit more sophisticated, but still pretty basic. We're still only making
a single image, but now the galaxy has an exponential radial profile and is sheared.
The PSF is a circular Moffat profile. And the noise is Poisson using the flux from both
the object and a background sky level to determine the variance in each pixel.
New features introduced in this demo:
- obj = galsim.Exponential(flux, scale_radius)
- obj = galsim.Moffat(beta, flux, half_light_radius)
- obj.applyShear(g1, g2) -- with explanation of other ways to specify shear
- rng = galsim.BaseDeviate(seed)
- noise = galsim.PoissonNoise(rng, sky_level)
- galsim.hsm.EstimateShear(image, image_epsf)
"""
import sys
import os
import math
import logging
import galsim
def main(argv):
"""
A little bit more sophisticated, but still pretty basic:
- Use a sheared, exponential profile for the galaxy.
- Convolve it by a circular Moffat PSF.
- Add Poisson noise to the image.
"""
# In non-script code, use getLogger(__name__) at module scope instead.
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("demo2")
gal_flux = 1.e5 # counts
gal_r0 = 2.7 # arcsec
g1 = 0.1 #
g2 = 0.2 #
psf_beta = 5 #
psf_re = 1.0 # arcsec
pixel_scale = 0.2 # arcsec / pixel
sky_level = 2.5e4 # counts / arcsec^2
# This time use a particular seed, so the image is deterministic.
# This is the same seed that is used in demo2.yaml, which means the images produced
# by the two methods will be precisely identical.
random_seed = 1534225
logger.info('Starting demo script 2 using:')
logger.info(' - sheared (%.2f,%.2f) exponential galaxy (flux = %.1e, scale radius = %.2f),',
g1, g2, gal_flux, gal_r0)
logger.info(' - circular Moffat PSF (beta = %.1f, re = %.2f),', psf_beta, psf_re)
logger.info(' - pixel scale = %.2f,', pixel_scale)
logger.info(' - Poisson noise (sky level = %.1e).', sky_level)
# Initialize the (pseudo-)random number generator that we will be using below.
rng = galsim.BaseDeviate(random_seed)
# Define the galaxy profile.
gal = galsim.Exponential(flux=gal_flux, scale_radius=gal_r0)
# Shear the galaxy by some value.
# There are quite a few ways you can use to specify a shape.
# q, beta Axis ratio and position angle: q = b/a, 0 < q < 1
# e, beta Ellipticity and position angle: |e| = (1-q^2)/(1+q^2)
# g, beta ("Reduced") Shear and position angle: |g| = (1-q)/(1+q)
# eta, beta Conformal shear and position angle: eta = ln(1/q)
# e1,e2 Ellipticity components: e1 = e cos(2 beta), e2 = e sin(2 beta)
# g1,g2 ("Reduced") shear components: g1 = g cos(2 beta), g2 = g sin(2 beta)
# eta1,eta2 Conformal shear components: eta1 = eta cos(2 beta), eta2 = eta sin(2 beta)
gal.applyShear(g1=g1, g2=g2)
logger.debug('Made galaxy profile')
# Define the PSF profile.
psf = galsim.Moffat(beta=psf_beta, flux=1., half_light_radius=psf_re)
logger.debug('Made PSF profile')
# Define the pixel size
pix = galsim.Pixel(pixel_scale)
logger.debug('Made pixel profile')
# Final profile is the convolution of these.
final = galsim.Convolve([gal, psf, pix])
final_epsf = galsim.Convolve([psf, pix])
logger.debug('Convolved components into final profile')
# Draw the image with a particular pixel scale.
image = final.draw(dx=pixel_scale)
image_epsf = final_epsf.draw(dx=pixel_scale)
logger.debug('Made image of the profile')
# To get Poisson noise on the image, we will use a class called PoissonNoise.
# However, we want the noise to correspond to what you would get with a significant
# flux from tke sky. This is done by telling PoissonNoise to add noise from a
# sky level in addition to the counts currently in the image.
#
# One wrinkle here is that the PoissonNoise class needs the sky level in each pixel,
# while we have a sky_level in counts per arcsec^2. So we need to convert:
sky_level_pixel = sky_level * pixel_scale**2
noise = galsim.PoissonNoise(rng, sky_level=sky_level_pixel)
image.addNoise(noise)
logger.debug('Added Poisson noise')
# Write the image to a file.
if not os.path.isdir('output'):
os.mkdir('output')
file_name = os.path.join('output', 'demo2.fits')
file_name_epsf = os.path.join('output','demo2_epsf.fits')
image.write(file_name)
image_epsf.write(file_name_epsf)
logger.info('Wrote image to %r',file_name)
logger.info('Wrote effective PSF image to %r',file_name_epsf)
results = galsim.hsm.EstimateShear(image, image_epsf)
logger.info('HSM reports that the image has observed shape and size:')
logger.info(' e1 = %.3f, e2 = %.3f, sigma = %.3f (pixels)', results.observed_shape.e1,
results.observed_shape.e2, results.moments_sigma)
logger.info('When carrying out Regaussianization PSF correction, HSM reports distortions')
logger.info(' e1, e2 = %.3f, %.3f',
results.corrected_e1, results.corrected_e2)
logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')
exp_shear = galsim.Shear(g1=g1, g2=g2)
logger.info(' g1, g2 = %.3f, %.3f', exp_shear.e1,exp_shear.e2)
if __name__ == "__main__":
main(sys.argv)
| mardom/GalSim | examples/demo2.py | Python | gpl-3.0 | 6,455 | [
"Galaxy"
] | 7ae8fcefb4f6e64d5ae785b8908a48f6e3d8d51e1cd81b9c75d4f120f192d37f |
""" A computing element class that attempts to use glexec if available then
defaults to the standard InProcess Computing Element behaviour.
"""
__RCSID__ = "$Id$"
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC import S_OK, S_ERROR
import DIRAC
import os
import distutils.spawn
MandatoryParameters = [ ]
class glexecComputingElement( ComputingElement ):
mandatoryParameters = MandatoryParameters
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.submittedJobs = 0
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
# Now glexec specific ones
#############################################################################
def submitJob( self, executableFile, proxy, dummy = None ):
""" Method to submit job, should be overridden in sub-class.
"""
self.log.verbose( 'Setting up proxy for payload' )
result = self.writeProxyToFile( proxy )
if not result['OK']:
return result
payloadProxy = result['Value']
if not os.environ.has_key( 'X509_USER_PROXY' ):
self.log.error( 'X509_USER_PROXY variable for pilot proxy not found in local environment' )
return S_ERROR( 'X509_USER_PROXY not found' )
pilotProxy = os.environ['X509_USER_PROXY']
self.log.info( 'Pilot proxy X509_USER_PROXY=%s' % pilotProxy )
os.environ[ 'GLEXEC_CLIENT_CERT' ] = payloadProxy
os.environ[ 'GLEXEC_SOURCE_PROXY' ] = payloadProxy
self.log.info( '\n'.join( [ 'Set payload proxy variables:',
'GLEXEC_CLIENT_CERT=%s' % payloadProxy,
'GLEXEC_SOURCE_PROXY=%s' % payloadProxy ] ) )
#Determine glexec location (default to standard InProcess behaviour if not found)
glexecLocation = None
result = self.glexecLocate()
if result['OK']:
glexecLocation = result['Value']
self.log.info( 'glexec found for local site at %s' % glexecLocation )
if glexecLocation:
result = self.recursivelyChangePermissions()
if not result['OK']:
self.log.error( 'Permissions change failed, continuing regardless...' )
else:
self.log.info( 'glexec not found, no permissions to change' )
#Test glexec with payload proxy prior to submitting the job
result = self.glexecTest( glexecLocation )
if not result['OK']:
res = self.analyseExitCode( result['Value'] ) #take no action as we currently default to InProcess
glexecLocation = None
if 'RescheduleOnError' in self.ceParameters and self.ceParameters['RescheduleOnError']:
result = S_ERROR( 'gLexec Test Failed: %s' % res['Value'] )
result['ReschedulePayload'] = True
return result
self.log.info( 'glexec test failed, will submit payload regardless...' )
#Revert to InProcess behaviour
if not glexecLocation:
self.log.info( 'glexec is not found, setting X509_USER_PROXY for payload proxy' )
os.environ[ 'X509_USER_PROXY' ] = payloadProxy
self.log.verbose( 'Starting process for monitoring payload proxy' )
gThreadScheduler.addPeriodicTask( self.proxyCheckPeriod, self.monitorProxy,
taskArgs = ( glexecLocation, pilotProxy, payloadProxy ),
executions = 0, elapsedTime = 0 )
#Submit job
self.log.info( 'Changing permissions of executable to 0755' )
try:
os.chmod( os.path.abspath( executableFile ), 0755 )
except Exception, x:
self.log.error( 'Failed to change permissions of executable to 0755 with exception:\n%s' % ( x ) )
result = self.glexecExecute( os.path.abspath( executableFile ), glexecLocation )
if not result['OK']:
self.analyseExitCode( result['Value'] ) #take no action as we currently default to InProcess
self.log.error( result )
return result
self.log.debug( 'glexec CE result OK' )
self.submittedJobs += 1
return S_OK()
#############################################################################
def recursivelyChangePermissions( self ):
""" Ensure that the current directory and all those beneath have the correct
permissions.
"""
currentDir = os.getcwd()
try:
self.log.info( 'Trying to explicitly change permissions for parent directory %s' % currentDir )
os.chmod( currentDir, 0755 )
except Exception, x:
self.log.error( 'Problem changing directory permissions in parent directory', str( x ) )
return S_OK()
userID = None
res = shellCall( 10, 'ls -al' )
if res['OK'] and res['Value'][0] == 0:
self.log.info( 'Contents of the working directory before permissions change:' )
self.log.info( str( res['Value'][1] ) )
else:
self.log.error( 'Failed to list the log directory contents', str( res['Value'][2] ) )
res = shellCall( 10, 'id -u' )
if res['OK'] and res['Value'][0] == 0:
userID = res['Value'][1]
self.log.info( 'Current user ID is: %s' % ( userID ) )
else:
self.log.error( 'Failed to obtain current user ID', str( res['Value'][2] ) )
return res
res = shellCall( 10, 'ls -al %s/../' % currentDir )
if res['OK'] and res['Value'][0] == 0:
self.log.info( 'Contents of the parent directory before permissions change:' )
self.log.info( str( res['Value'][1] ) )
else:
self.log.error( 'Failed to list the parent directory contents', str( res['Value'][2] ) )
self.log.verbose( 'Changing permissions to 0755 in current directory %s' % currentDir )
for dirName, subDirs, files in os.walk( currentDir ):
try:
self.log.info( 'Changing file and directory permissions to 0755 for %s' % dirName )
if os.stat( dirName )[4] == userID and not os.path.islink( dirName ):
os.chmod( dirName, 0755 )
for toChange in files:
toChange = os.path.join( dirName, toChange )
if os.stat( toChange )[4] == userID and not os.path.islink( toChange ):
os.chmod( toChange, 0755 )
except Exception, x:
self.log.error( 'Problem changing directory permissions', str( x ) )
self.log.info( 'Permissions in current directory %s updated successfully' % ( currentDir ) )
res = shellCall( 10, 'ls -al' )
if res['OK'] and res['Value'][0] == 0:
self.log.info( 'Contents of the working directory after changing permissions:' )
self.log.info( str( res['Value'][1] ) )
else:
self.log.error( 'Failed to list the log directory contents', str( res['Value'][2] ) )
res = shellCall( 10, 'ls -al %s/../' % currentDir )
if res['OK'] and res['Value'][0] == 0:
self.log.info( 'Contents of the parent directory after permissions change:' )
self.log.info( str( res['Value'][1] ) )
else:
self.log.error( 'Failed to list the parent directory contents', str( res['Value'][2] ) )
return S_OK()
#############################################################################
def analyseExitCode( self, resultTuple ):
""" Analyses the exit codes in case of glexec failures. The convention for
glexec exit codes is listed below:
Shell exit codes:
127 - command not found
129 - command died due to signal 1 (SIGHUP)
130 - command died due to signal 2 (SIGINT)
glexec specific codes:
201 - client error
202 - internal error
203 - authz error
"""
if not resultTuple:
return S_OK()
# FIXME: the wrapper will return:
# > 0 if there are problems with the payload
# < 0 if there are problems with the wrapper itself
# 0 if everything is OK
codes = {}
codes[127] = 'Shell exited, command not found'
codes[129] = 'Shell interrupt signal 1 (SIGHUP)'
codes[130] = 'Shell interrupt signal 2 (SIGINT)'
codes[201] = 'glexec failed with client error'
codes[202] = 'glexec failed with internal error'
codes[203] = 'glexec failed with authorization error'
status = resultTuple[0]
stdOutput = resultTuple[1]
stdError = resultTuple[2]
self.log.info( 'glexec call failed with status %s' % ( status ) )
self.log.info( 'glexec stdout:\n%s' % stdOutput )
self.log.info( 'glexec stderr:\n%s' % stdError )
error = None
for code, msg in codes.items():
self.log.verbose( 'Exit code %s => %s' % ( code, msg ) )
if status == code:
error = msg
if not error:
self.log.error( 'glexec exit code %s not in expected list' % ( status ) )
else:
self.log.error( 'Resolved glexec return code %s = %s' % ( status, error ) )
return S_OK( error )
#############################################################################
def glexecTest( self, glexecLocation ):
"""Ensure that the current DIRAC distribution is group readable e.g. dirac-proxy-info
also check the status code of the glexec call.
"""
if not glexecLocation:
return S_OK( 'Nothing to test' )
testFile = 'glexecTest.sh'
cmds = ['#!/bin/sh']
cmds.append( 'id' )
cmds.append( 'hostname' )
cmds.append( 'date' )
cmds.append( '%s/scripts/dirac-proxy-info' % DIRAC.rootPath )
fopen = open( testFile, 'w' )
fopen.write( '\n'.join( cmds ) )
fopen.close()
self.log.info( 'Changing permissions of test script to 0755' )
try:
os.chmod( os.path.abspath( testFile ), 0755 )
except Exception, x:
self.log.error( 'Failed to change permissions of test script to 0755 with exception:\n%s' % ( x ) )
return S_ERROR( 'Could not change permissions of test script' )
return self.glexecExecute( os.path.abspath( testFile ), glexecLocation )
#############################################################################
def glexecExecute( self, executableFile, glexecLocation ):
"""Run glexec with checking of the exit status code.
"""
cmd = executableFile
if glexecLocation and executableFile:
cmd = "%s /bin/bash -lc '%s'" % ( glexecLocation, executableFile )
if glexecLocation and not executableFile:
cmd = '%s' % ( glexecLocation )
self.log.info( 'CE submission command is: %s' % cmd )
result = shellCall( 0, cmd, callbackFunction = self.sendOutput )
if not result['OK']:
result['Value'] = ( 0, '', '' )
return result
resultTuple = result['Value']
status = resultTuple[0]
stdOutput = resultTuple[1]
stdError = resultTuple[2]
self.log.info( "Status after the glexec execution is %s" % str( status ) )
if status >=127:
error = S_ERROR( status )
error['Value'] = ( status, stdOutput, stdError )
return error
return result
#############################################################################
def glexecLocate( self ):
"""Try to find glexec on the local system, if not found default to InProcess.
"""
glexecPath = ""
if os.environ.has_key( 'OSG_GLEXEC_LOCATION' ):
glexecPath = '%s' % ( os.environ['OSG_GLEXEC_LOCATION'] )
elif os.environ.has_key( 'GLITE_LOCATION' ):
glexecPath = '%s/sbin/glexec' % ( os.environ['GLITE_LOCATION'] )
else: #try to locate the excutable in the PATH
glexecPath = distutils.spawn.find_executable( "glexec" )
if not glexecPath:
self.log.info( 'Unable to locate glexec, site does not have GLITE_LOCATION nor OSG_GLEXEC_LOCATION defined' )
return S_ERROR( 'glexec not found' )
if not os.path.exists( glexecPath ):
self.log.info( 'glexec not found at path %s' % ( glexecPath ) )
return S_ERROR( 'glexec not found' )
return S_OK( glexecPath )
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
return result
#############################################################################
def monitorProxy( self, glexecLocation, pilotProxy, payloadProxy ):
""" Monitor the payload proxy and renew as necessary.
"""
retVal = self._monitorProxy( pilotProxy, payloadProxy )
if not retVal['OK']:
# Failed to renew the proxy, nothing else to be done
return retVal
if not retVal['Value']:
# No need to renew the proxy, nothing else to be done
return retVal
if glexecLocation:
self.log.info( 'Rerunning glexec without arguments to renew payload proxy' )
result = self.glexecExecute( None, glexecLocation )
if not result['OK']:
self.log.error( result )
else:
self.log.info( 'Running without glexec, checking local proxy' )
return S_OK( 'Proxy checked' )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| calancha/DIRAC | Resources/Computing/glexecComputingElement.py | Python | gpl-3.0 | 13,436 | [
"DIRAC"
] | 168885c6fcbd97f06f42fa144a449c9e9e93c5c789814c6762d7afbdb568d667 |
#!/usr/bin/python
import sys, pylab, scipy.stats.mstats, scipy.optimize
import numpy as np
import src.lib.utils as fn
import src.lib.wsutils as ws
out = fn.Verbose()
def get_sky_val(data, show=False, range=None, nbins=None, save=None):
#TODO: prendre la median a partir d'un certain point
if range is None: range = [0.01,0.99]
d = data.copy()
d[np.where(np.isnan(d))] = np.median(data)
q = scipy.stats.mstats.mquantiles(d.ravel(), prob=[0, range[0], range[1], 1])
if nbins is None:
nbins = np.max(100, int(np.abs(q[1])+np.abs(q[2])))
out(2, 'Building histogram...')
h = np.histogram(d.ravel(), bins = np.linspace(q[1],q[2],nbins), normed=False)
out(2, 'Done: ', repr(nbins), "bins, ", "median =", repr(np.median(d)))
out(2, 'Begin gaussian fit...')
bnind = np.where(h[0]==h[0].max())[0][0]
g = lambda c, f, I: lambda x: I*np.exp(-(x-c)**2/(2*f**2))
errfun = lambda p: g(*p)(h[1][:h[0].shape[0]]) - h[0]
print (h[1][bnind], np.abs(h[1][-1]-h[1][0])/20., h[0][bnind])
p, success = scipy.optimize.leastsq(errfun,
(h[1][bnind], np.abs(h[1][-1]-h[1][0])/20., h[0][bnind]),
# (h[1][bnind], h[0].shape[0]/20., h[0][bnind]), #changed for hst, get back if needed
# maxfev=1000,
warning=False)
p[1] = np.abs(p[1])
out(2, 'Done!')
out(2, "fit results: center (sky) =", repr(p[0]), "FWHM =", repr(p[1]), "intensity =", repr(p[2]))
if show or save:
pylab.figure(1)
pylab.plot(h[1][:h[0].shape[0]], h[0], label='data distribution')
pylab.plot(h[1][:h[0].shape[0]], errfun(p)+h[0], label='gaussian fit')
pylab.plot(h[1][:h[0].shape[0]], errfun(p), label='error')
pylab.legend()
if show == True:
pylab.show()
if save != None:
pylab.savefig(save)
pylab.clf()
# pylab.close()
return p[0], p[1]
def main(argv=None):
cfg = 'config.py'
if argv is not None:
sys.argv = argv
opt, args = fn.get_args(sys.argv)
if args is not None: cfg = args[0]
p = False
SKY_RANGE = NBINS = SHOW = None
f = open(cfg, 'r')
exec f.read()
f.close()
vars = ['FILENAME', 'NOWRITE']
err = fn.check_namespace(vars, locals())
if 's' in opt:
out.level = 0
if 'v' in opt:
out.level = 2
if 'd' in opt:
DEBUG = True
out.level = 3
out(1, '~~~ DEBUG MODE ~~~')
if 'h' in opt:
out(1, 'No help page yet!')
return 0
if 'p' in opt:
sky, sig = get_sky_val(fn.get_data(args[0], directory='.'), show = True, range=SKY_RANGE, nbins=NBINS)
out(1, 'Analysis done! Sky:', sky, 'sigma:', sig)
return 0
if err > 0:
return 1
out(2, FILENAME, cfg) #@UndefinedVariable
files, cat = ws.get_files(FILENAME, directory='images') #@UndefinedVariable
skyl = []
sigl = []
fnb = len(files)
for i in xrange(fnb):
out(1, '===============', i+1, '/', fnb,'===============|')
out(1, 'Getting sky from', files[i])
sky, sig = get_sky_val(fn.get_data(files[i], directory='images'), show = SHOW, range=SKY_RANGE, nbins=NBINS)
skyl += [sky]
sigl += [sig]
out(1, 'Analysis done! Sky:', sky, 'sigma:', sig)
out(1, '------------------------------------------')
out(1, 'sky:', skyl, 'sigma:', sigl)
if NOWRITE is False and not p: #@UndefinedVariable
fn.write_cfg(cfg, {'SKY_BACKGROUND':skyl, 'SIGMA_SKY':sigl})
return 0
if __name__ == "__main__":
sys.exit(main())
| COSMOGRAIL/COSMOULINE | pipe/modules/src/_1_get_sky.py | Python | gpl-3.0 | 3,705 | [
"Gaussian"
] | 3f68f2085dacf8626509849796192d47269c7bd68496a099992ce05da6a0ab2e |
import string
import random
import json
from collections import defaultdict
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id):
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id, user_id, op_type, location=None, freetext=None):
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence"
]
if not op_type in operation_type_array:
return {'error': 'Operation type {0} not valid'.format(op_type)}
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if not location is None:
new_log.location = Double3D(*location)
if not freetext is None:
new_log.freetext = freetext
new_log.save()
# Tip from: http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/
# Required because we need a RequestContext, not just a Context - the
# former looks at TEMPLATE_CONTEXT_PROCESSORS, while the latter doesn't.
def my_render_to_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
def json_error_response(message):
"""
When an operation fails we should return a JSON dictionary
with the key 'error' set to an error message. This is a
helper method to return such a structure:
"""
return HttpResponse(json.dumps({'error': message}),
content_type='text/json')
def order_neurons(neurons, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
# Both index and visual_index take a request and kwargs and then
# return a list of neurons and a NeuronSearch form:
def get_form_and_neurons(request, project_id, kwargs):
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron'):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
# TODO After all PHP functions have been replaced and all occurrence of
# this odd behavior have been found, change callers to not depend on this
# legacy functionality.
def makeJSON_legacy_list(objects):
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id):
return {rname: ID for rname, ID in Relation.objects.filter(project=project_id).values_list("relation_name", "id")}
def get_class_to_id_map(project_id):
return {cname: ID for cname, ID in Class.objects.filter(project=project_id).values_list("class_name", "id")}
def urljoin(a, b):
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween.
"""
if a[-1] != '/':
a = a + '/'
if b[0] == '/':
b = b[1:]
return a + b
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
""" Creates a random string of the specified length.
"""
return ''.join(random.choice(chars) for x in range(size))
| fzadow/CATMAID | django/applications/catmaid/control/common.py | Python | agpl-3.0 | 8,080 | [
"NEURON"
] | 668efdca3132a9613a0ea59d3c5db0594334ee109b7763a76b1fb2dbef94ac74 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import math
import numpy
import os.path
import rmgpy.constants as constants
from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer
################################################################################
class QchemLog:
"""
Represent an output file from Qchem. The attribute `path` refers to the
location on disk of the Qchem output file of interest. Methods are provided
to extract a variety of information into CanTherm classes and/or NumPy
arrays.
"""
def __init__(self, path):
self.path = path
def getNumberOfAtoms(self):
"""
Return the number of atoms in the molecular configuration used in
the Qchem output file.
"""
Natoms = 0
# Open Qchem log file for parsing
f = open(self.path, 'r')
line = f.readline()
while line != '' and Natoms == 0:
# Automatically determine the number of atoms
if 'Standard Nuclear Orientation' in line and Natoms == 0:
for i in range(3): line = f.readline()
while '----------------------------------------------------' not in line:
Natoms += 1
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
# Return the result
return Natoms
def loadForceConstantMatrix(self):
"""
Return the force constant matrix (in Cartesian coordinates) from the
QChem log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file,
``None`` is returned.
"""
F = None
Natoms = self.getNumberOfAtoms()
Nrows = Natoms * 3
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read force constant matrix
if 'Final Hessian.' in line or 'Hessian of the SCF Energy' in line:
F = numpy.zeros((Nrows,Nrows), numpy.float64)
for i in range(int(math.ceil(Nrows / 6.0))):
# Header row
line = f.readline()
# Matrix element rows
for j in range(Nrows): #for j in range(i*6, Nrows):
data = f.readline().split()
for k in range(len(data)-1):
F[j,i*6+k] = float(data[k+1])
#F[i*5+k,j] = F[j,i*5+k]
# Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2
F *= 4.35974417e-18 / 5.291772108e-11**2
line = f.readline()
# Close file when finished
f.close()
return F
def loadGeometry(self):
"""
Return the optimum geometry of the molecular configuration from the
Qchem log file. If multiple such geometries are identified, only the
last is returned.
"""
atom = []; coord = []; number = [];
try:
f = open(self.path, 'r')
except IndexError:
print('File not found')
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Final energy is' in line:
print 'found a sucessfully completed Qchem Geometry Optimization Job'
line = f.readline()
atom = []; coord = []
break
line = f.readline()
found = 0
while line != '':
if 'Standard Nuclear Orientation' in line:
found += 1
for i in range(3): line = f.readline() # skip lines
while '----------------------------------------------------' not in line:
data = line.split()
atom.append((data[1]))
coord.append([float(data[2]), float(data[3]), float(data[4])])
line = f.readline()
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
if found ==1: break
line = f.readline()
#print coord
f.close()
coord = numpy.array(coord, numpy.float64)
mass = numpy.array(coord, numpy.float64)
# Assign appropriate mass to each atom in molecule
# These values were taken from "Atomic Weights and Isotopic Compositions" v3.0 (July 2010) from NIST
mass = [0]*len(atom)
for i in range(len(atom)):
if atom[i] == 'H':
mass[i] = 1.00782503207
number.append('1')
elif atom[i] == 'C':
mass[i] = 12.0
number.append('6')
elif atom[i] == 'N':
mass[i] = 14.0030740048
number[i] = 7
number.append('7')
elif atom[i] == 'O':
mass[i] = 15.99491461956
number.append('8')
elif atom[i] == 'P':
mass[i] = 30.97376163
number.append('15')
elif atom[i] == 'S':
mass[i] = 31.97207100
number.append('16')
elif atom[i] == 'Cl':
mass[i] = 35.4527
number.append('17')
else:
print 'Atomic atom {0:d} not yet supported in loadGeometry().'.format(atom[i])
number = numpy.array(number, numpy.int)
return coord, number, mass
def loadConformer(self, symmetry=None, spinMultiplicity=None, opticalIsomers=1):
"""
Load the molecular degree of freedom data from a log file created as
the result of a Qchem "Freq" calculation. As
Qchem's guess of the external symmetry number is not always correct,
you can use the `symmetry` parameter to substitute your own value; if
not provided, the value in the Qchem output file will be adopted.
"""
modes = []; freq = []; mmass = []; rot = []
E0 = 0.0
f = open(self.path, 'r')
line = f.readline()
while line != '':
# The data we want is in the Thermochemistry section of the output
if 'VIBRATIONAL ANALYSIS' in line:
modes = []
inPartitionFunctions = False
line = f.readline()
while line != '':
# This marks the end of the thermochemistry section
if 'Thank you very much for using Q-Chem.' in line:
break
# Read vibrational modes
elif 'VIBRATIONAL FREQUENCIES (CM**-1)' in line:
frequencies = []
while 'STANDARD THERMODYNAMIC QUANTITIES AT' not in line:
if ' Frequency:' in line:
frequencies.extend([float(d) for d in line.split()[-3:]])
line = f.readline()
line = f.readline()
# If there is an imaginary frequency, remove it
if frequencies[0] < 0.0:
frequencies = frequencies[1:]
vibration = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
#modes.append(vibration)
freq.append(vibration)
# Read molecular mass for external translational modes
elif 'Molecular Mass:' in line:
mass = float(line.split()[2])
translation = IdealGasTranslation(mass=(mass,"amu"))
#modes.append(translation)
mmass.append(translation)
# Read moments of inertia for external rotational modes, given in atomic units
elif 'Eigenvalues --' in line:
inertia = [float(d) for d in line.split()[-3:]]
# If the first eigenvalue is 0, the rotor is linear
if inertia[0] == 0.0:
inertia.remove(0.0)
for i in range(2):
inertia[i] *= (constants.a0/1e-10)**2
rotation = LinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
#modes.append(rotation)
rot.append(rotation)
else:
for i in range(3):
inertia[i] *= (constants.a0/1e-10)**2
rotation = NonlinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
#modes.append(rotation)
rot.append(rotation)
# Read Qchem's estimate of the external rotational symmetry number, which may very well be incorrect
elif 'Rotational Symmetry Number is' in line and symmetry is None:
symmetry = int(float(line.split()[4]))
elif 'Final energy is' in line:
E0 = float(line.split()[3]) * constants.E_h * constants.Na
print 'energy is' + str(E0)
# Read ZPE and add to ground-state energy
# NEED TO MULTIPLY ZPE BY scaling factor!
elif 'Zero point vibrational energy:' in line:
ZPE = float(line.split()[4]) * 4184
E0=E0+ZPE
# Read spin multiplicity if not explicitly given
# elif 'Electronic' in line and inPartitionFunctions and spinMultiplicity is None:
# spinMultiplicity = int(float(line.split()[1].replace('D', 'E')))
# elif 'Log10(Q)' in line:
# inPartitionFunctions = True
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
modes = mmass + rot + freq
#modes.append(mmass), modes.append(rot), modes.append(freq)
return Conformer(E0=(E0*0.001,"kJ/mol"), modes=modes, spinMultiplicity=spinMultiplicity, opticalIsomers=opticalIsomers)
def loadEnergy(self,frequencyScaleFactor=1.):
"""
Load the energy in J/mol from a Qchem log file. Only the last energy
in the file is returned. The zero-point energy is *not* included in
the returned value.
"""
modes = []
E0 = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Final energy is' in line:
E0 = float(line.split()[3]) * constants.E_h * constants.Na
print 'energy is' + str(E0)
# elif 'Zero point vibrational energy' in line:
#Qchem's ZPE is in kcal/mol
# ZPE = float(line.split()[4]) * 4184
# scaledZPE = ZPE * frequencyScaleFactor
# print 'ZPE is ' + str(ZPE)
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if E0 is not None:
return E0
else:
raise Exception('Unable to find energy in Qchem output file.')
def loadZeroPointEnergy(self,frequencyScaleFactor=1.):
"""
Load the unscaled zero-point energy in J/mol from a Qchem output file.
"""
modes = []
ZPE = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
# if 'Final energy is' in line:
# E0 = float(line.split()[3]) * constants.E_h * constants.Na
# print 'energy is' + str(E0)
if 'Zero point vibrational energy' in line:
#Qchem's ZPE is in kcal/mol
ZPE = float(line.split()[4]) * 4184
#scaledZPE = ZPE * frequencyScaleFactor
print 'ZPE is' + str(ZPE)
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if ZPE is not None:
return ZPE
else:
raise Exception('Unable to find zero-point energy in Qchem output file.')
def loadScanEnergies(self):
"""
Extract the optimized energies in J/mol from a Qchem log file, e.g. the
result of a Qchem "PES Scan" quantum chemistry calculation.
"""
Vlist = []
angle = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'Summary of potential scan:' in line:
line = f.readline()
print 'found a sucessfully completed Qchem Job'
while '-----------------' not in line:
# print len(line.split())
# Vlist.append(float(line.split()[1]))
values = [float(item) for item in line.split()]
angle.append(values[0])
Vlist.append(values[1])
# Read the next line in the file
line = f.readline()
line = f.readline()
if 'SCF failed to converge' in line:
print 'Qchem Job did not sucessfully complete: SCF failed to converge'
break
# Close file when finished
print ' Assuming', os.path.basename(self.path), 'is the output from a Qchem PES scan...'
f.close()
# Adjust energies to be relative to minimum energy conformer
# Also convert units from Hartree/particle to J/mol
Vlist = numpy.array(Vlist, numpy.float64)
Vlist -= numpy.min(Vlist)
Vlist *= constants.E_h * constants.Na
angle = numpy.arange(0.0, 2*math.pi+0.00001, 2*math.pi/(len(Vlist)-1), numpy.float64)
return Vlist, angle
def loadNegativeFrequency(self):
"""
Return the imaginary frequency from a transition state frequency
calculation in cm^-1.
"""
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read imaginary frequency
if ' Frequency:' in line:
frequency = float((line.split()[1]))
break
line = f.readline()
# Close file when finished
f.close()
#Make sure the frequency is imaginary:
if frequency < 0:
return frequency
else:
raise Exception('Unable to find imaginary frequency in QChem output file.')
| faribas/RMG-Py | rmgpy/cantherm/qchem.py | Python | mit | 16,767 | [
"Q-Chem"
] | bf15303c47b06748cad5a2e2ad750684da14e96b4b5447171e3382183d441caa |
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the Bacterial and Archaeal Genome Analyser
# Copyright (C) 2015-16 David Williams
# david.williams.at.liv.d-dub.org.uk
# License GPLv3+: GNU GPL version 3 or later
# This is free software: you are free to change and redistribute it
# There is NO WARRANTY, to the extent permitted by law
#
# Work on this software was started at The University of Liverpool, UK
# with funding from The Wellcome Trust (093306/Z/10) awarded to:
# Dr Steve Paterson (The University of Liverpool, UK)
# Dr Craig Winstanley (The University of Liverpool, UK)
# Dr Michael A Brockhurst (The University of York, UK)
#
'''
Commandline interface to the Bacterial and Archaeal Genome Analyser
(baga) Python package.
'''
import argparse
import textwrap
import sys
import re
import os
import subprocess
from collections import defaultdict
import pdb
# a basic installation keeps the commandline interface in the package folder
path_to_baga_package = os.path.sep.join(os.path.realpath(__file__).split(os.path.sep)[:-2])
# so importing the module needs to be done from there
sys.path.append(path_to_baga_package)
from baga import Dependencies
def check_files(files):
have_these = set()
if isinstance(files, dict):
# dictionary of paired fastqs
for run_acc, thesefiles in files.items():
have = 0
for n,f in thesefiles.items():
try:
with open(f, 'rb') as filein:
if os.path.getsize(f):
# confirm it isn't an empty file
have += 1
except IOError:
pass
if have == len(thesefiles):
have_these.add(run_acc)
else:
# list of SAMs or BAMs
for alnfile in files:
try:
with open(alnfile, 'rb') as filein:
if os.path.getsize(alnfile):
# confirm it isn't an empty file
have_these.add(alnfile)
except IOError:
pass
if have_these == set(files):
return(True)
else:
return(False)
def delete_files(files, extra = False):
total_size = 0
if isinstance(files, dict):
for run_acc, thesefiles in files.items():
for n,f in thesefiles.items():
total_size += os.path.getsize(f)
print('Deleting {}'.format(f))
os.unlink(f)
if extra:
if extra in f:
presubsampled = f.replace(extra,'')
total_size += os.path.getsize(presubsampled)
print('Deleting {}'.format(presubsampled))
os.unlink(presubsampled)
else:
for alnfile in files:
total_size += os.path.getsize(alnfile)
print('Deleting {}'.format(alnfile))
os.unlink(alnfile)
return(total_size)
from baga import get_exe_path
def exe_fail(program_name):
print('Could not find the {} executable at executable at {}.'.format(
program_name, get_exe_path(program_name)))
print('You can check if it is installed using:')
print('{} Dependencies --check {}'.format(sys.argv[0], program_name))
print('You can install it locally using:')
print('{} Dependencies --get {}'.format(sys.argv[0], program_name))
sys.exit(1)
def check_baga_path(baga_prefix, supplied_name):
'''
to allow for either path and filename or just name of baga objects,
do some checks to see what was supplied at commandline. Return both
the name and the path to the baga file
'''
if baga_prefix[-1] == '-':
baga_prefix = baga_prefix[:-1]
# try as path
try:
use_path = os.path.expanduser(supplied_name)
use_name = use_path.split(os.path.sep)[-1].replace(baga_prefix+'-','').replace('.baga','')
with open(use_path,'rb') as f:
return(use_path,use_name)
except IOError:
pass
# try as baga name
try:
use_path = '{}-{}.baga'.format(baga_prefix, supplied_name)
print(use_path)
with open(use_path,'rb') as f:
return(use_path,supplied_name)
except IOError:
pass
return(False,False)
def check_direct_arguments(arguments, wrapped_tools):
'''
Parse commandline arguments to be passed to wrapped tools.
Checks whether specified target programs are recognised among task-specific
"wrapped_tools".
'''
if arguments:
# to avoid conflicts on the command line, these direct arguments to
# wrapped tools need to use underscores instead of dashes, so replace
# former with latter here
def underscores(txt):
'''convert __ to -- and _ to -'''
a = re.sub('(_)(_)', r'-\1', txt)
a = re.sub('([- ]|^)(_)([\w])', r'\1-\3', a)
return(a)
direct_arguments = {a:underscores(b) for a,b in zip(arguments[::2],
arguments[1::2])}
assert len(direct_arguments) != 0, 'Supplied direct arguments ({}) '\
'not recognised. Should be: "--arguments <program name> '\
'<\'_o 5 __option 2\'>" (use underscore, _o and __option, '\
'instead of hyphen, -o and --option)'
recognised = set(direct_arguments) & set(wrapped_tools)
if len(recognised) == 0:
recognised = ['none']
not_recognised = set(direct_arguments) - set(wrapped_tools)
print(direct_arguments, not_recognised, len(not_recognised))
assert len(not_recognised) == 0, 'Some of the wrapped '\
'program names specified are not recognised:\n'\
'Recognised: {}\n'\
'Not recognised: {}\n'.format(', '.join(recognised),
', '.join(not_recognised))
return(direct_arguments)
else:
# CLI defaults to False for empty
return({})
text_width = 70
title = 'Bacterial and Archaeal Genome Analyser'
subtitle = textwrap.fill('Novel analyses and wrapped tools pipelined for convenient processing of genome sequences', text_width)
version_date = 'December 20 2015'
version_num = 0.2
authors = 'David Williams'
email = 'david.williams.at.liv.d-dub.org.uk'
blurb = '''Work on this software was started at The University of Liverpool, UK
with funding from The Wellcome Trust (093306/Z/10) awarded to:
Dr Steve Paterson (The University of Liverpool, UK)
Dr Craig Winstanley (The University of Liverpool, UK)
Dr Michael A Brockhurst (The University of York, UK)
Copyright (C) 2015 David Williams
License GPLv3+: GNU GPL version 3 or later
This is free software: you are free to change and redistribute it
There is NO WARRANTY, to the extent permitted by law
'''
splash = '\n{title}:\n\n{subtitle}\n\nVersion {version_num} ({version_date})\n\n{authors}\n{email}\n\n\n{blurb}\n'.format(
title = title,
subtitle = subtitle,
version_date = version_date,
version_num = version_num,
authors = authors,
email = email,
blurb = blurb)
## get options from command line and decide what to do
parser = argparse.ArgumentParser(
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('{} {}:\n{}'.format(
title,
version_num,
subtitle),
text_width,
replace_whitespace = False),
epilog = textwrap.fill('The {} (baga) will perform all necessary downloads and analysis in the current directory. Please ensure there is sufficient disk space for downloads of short read data (many large fastq files) and subsequent analyses. Third party programs can either be downloaded and compiled by baga (except git, make and GATK) or installed system wide before-hand by the user or installed locally with paths to executables supplied in the dependencies.conf file.\n\nExample usage: %(prog)s CollectData ...\n'.format(title), text_width, replace_whitespace = False))
parser.add_argument('--version', action='version',
version='%(prog)s {} ({})'.format(version_num, version_date))
group = parser.add_mutually_exclusive_group(required=False)
#parser.add_argument('--nosplash',
group.add_argument('--nosplash',
help = "Supress printing of start-up splash info.",
action = 'store_true',
default = False)
group.add_argument('--splash',
help = "Print start-up splash info.",
action = 'store_true',
default = True)
# group = parser.add_mutually_exclusive_group()
# group.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
# group.add_argument("-q", "--quiet", help="suppress output", action="store_true")
subparser_adder = parser.add_subparsers(title = 'Analyses', dest="subparser")
parser_Dependencies = subparser_adder.add_parser('Dependencies',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Check required external programs \
are available and optionally get them.',
text_width,
replace_whitespace = False))
# epilog = textwrap.fill('Groups of read data sets from the \
# CollectData option are loaded by providing the file name.\n\n\
# Example usage: "%(prog)s PrepareReads --reads_name ERR953490plus5others --adaptors --trim --align FM209186"\n'\
# .format(title), text_width, replace_whitespace = False))
group_check_or_get = parser_Dependencies.add_mutually_exclusive_group()
group_check_or_get.add_argument('-c', "--check",
help = "check BAG Analyser has access to a dependency either in the system path or locally",
type = str,
choices = sorted(Dependencies.dependencies),
nargs = '+')
group_check_or_get.add_argument('-p', "--checkpackage",
help = "check BAG Analyser has access to a python package dependency. Sometimes required if baga just installed a package and a fresh Python instance is required to check it",
type = str,
#choices = sorted(Dependencies.dependencies),
choices = sorted([name for name,info in Dependencies.dependencies.items() if \
info['checker']['function'] == Dependencies.check_python_package]))
group_check_or_get.add_argument('-g', "--get",
help = "get (or explain how to get) a dependency for BAG Analyser",
type = str,
choices = sorted(Dependencies.dependencies),
nargs = '+')
group_check_or_get.add_argument('-C', "--checkget",
help = "check a dependency for BAG Analyser and attempt to get if not available",
type = str,
choices = sorted(Dependencies.dependencies),
nargs = '+')
group_check_or_get.add_argument('-f', "--checkgetfor",
help = "check a set of dependencies for a BAG Analyser task and attempt to get those that are not available",
type = str,
choices = sorted(Dependencies.dependencies_by_task),
nargs = '+')
parser_Dependencies.add_argument("-V", "--versions_file",
help="specify file containing versions of software dependencies to use. Defaults to versions.yaml in current folder, falls back to versions.yaml in same folder as baga_cli.py (which might be the same one). If no yaml file specified or found, will use a set of versions built into Dependencies.py that were current in late 2015.",
type = str,
default = 'versions.yaml')
parser_CollectData = subparser_adder.add_parser('CollectData',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Download and parse genomes or \
download short reads for analysis by {}'.format(title),
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Genomes can be loaded from genbank \
files with user provided paths or downloaded from the National Center for \
Biotechnology Information (NCBI) with user provided accession numbers. Short \
reads can be downloaded from the European Nucleotide Archive with user provided \
Run Accession numbers\n\nExample usage: "%(prog)s -r \
ERR953490,ERR953494,ERR953501,ERR953509,ERR953491,ERR953513"\n'\
.format(title), text_width, replace_whitespace = False))
mutually_exclusive_group = parser_CollectData.add_mutually_exclusive_group()
# could use nargs='+' or similar here to get a list directly,
# but not sure about spaces in paths
mutually_exclusive_group.add_argument("-g", "--genomes",
help="(down)load and parse genomes for analysis",
type = str,
nargs = '+')
mutually_exclusive_group.add_argument("-r", "--reads_download",
help="download short reads for analysis",
type = str,
nargs = '+')
mutually_exclusive_group.add_argument("-R", "--reads_path",
help="path to local short reads in fastq files for analysis. All read files will be collcted. Alternatively, a pattern containing '*' '?' and other shell expansion characters or an explicit list of read files can also be supplied.",
type = str,
nargs = '+')
parser_CollectData.add_argument('-e', "--email_address",
help = "required for downloading from NCBI")
parser_CollectData.add_argument('-n', "--reads_group_name",
help = "optional for downloading from NCBI, required for loading from local path")
parser_PrepareReads = subparser_adder.add_parser('PrepareReads',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Prepare reads for alignment to \
genome sequence by removing adaptor sequences and trimming by position specific \
quality scores. Align reads to a reference genome sequence.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Groups of read data sets from the \
CollectData option are loaded by providing the file name.\n\n\
Example usage: "%(prog)s --reads_name ERR953490plus5others --adaptors --trim --align FM209186"\n',
text_width, replace_whitespace = False))
parser_PrepareReads.add_argument('-n', "--reads_name",
help = "name of read datasets group generated by the CollectData option",
type = str,
required = True)
parser_PrepareReads.add_argument('-N', "--max_cpus",
help = "maximum number of cpu cores used when parallel processing",
type = int,
default = -1)
parser_PrepareReads.add_argument('-F', "--force",
help = "overwrite existing data: required when repeating an analysis else \
previous versions retained. Retention of previous versions is convenient for \
resuming an interrupted analysis in which only some read sets were processed.",
action = 'store_true',
default = False)
parser_PrepareReads.add_argument('-s', "--subsample_to_cov",
help = "subsample reads to a requested average coverage depth of a given \
genome size. This provides smaller files of a consistent size saving storage \
space and processing time later and benefitting some analyses like de novo \
assembly. E.g.\n\
'--subsample_to_cov 80 5000000' for 80x coverage of a 5 Mb genome.",
type = int,
nargs = 2,
metavar = ('COVERAGE_DEPTH','GENOME_LENGTH'))
parser_PrepareReads.add_argument('-a', "--adaptors",
help = "cut residual adaptor sequences from reads using CutAdapt. Defaults to \
using read sets previously subsampled to adequate estimated reference genome \
coverage. Optionally use original reads, even if subsampling has been performed, \
with '--adaptors fullsample'.",
type = str,
nargs='?',
const = 'subsample',
choices = ['fullsample','subsample'])
parser_PrepareReads.add_argument('-t', "--trim",
help = "trim read ends based on quality scores using Sickle",
action = 'store_true')
parser_PrepareReads.add_argument('-D', "--delete_intermediates",
help = "delete intermediate fastq files to save space. Files are only deleted if those for next stage are found",
action = 'store_true')
parser_AlignReads = subparser_adder.add_parser('AlignReads',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Align reads to a reference genome sequence.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Reads prepared by the PrepareReads module are loaded. \
A genome prepared by the CollectData option are loaded by providing the file name.\n\n\
Example usage: "%(prog)s --reads_name ERR953490plus5others --genome_name FM209186 --align"\n',
text_width, replace_whitespace = False))
parser_AlignReads.add_argument('-n', "--reads_name",
help = "name of read datasets group prepared by the PrepareReads option",
type = str,
required = True)
parser_AlignReads.add_argument('-g', "--genome_name",
help = "name of genome obtained by the CollectData option",
type = str,
required = True)
parser_AlignReads.add_argument('-N', "--max_cpus",
help = "maximum number of cpu cores used when parallel processing",
type = int,
default = -1)
parser_AlignReads.add_argument('-F', "--force",
help = "overwrite existing data: required when repeating an analysis else \
previous versions retained. Retention of previous versions is convenient for \
resuming an interrupted analysis in which only some read sets were processed.",
action = 'store_true',
default = False)
parser_AlignReads.add_argument('-a', "--align",
help = "align reads to a genome using Burrows Wheeler Aligner (BWA). \
Requires name of genome prepared by CollectData option. Convert to BAM, sort \
and index",
action = 'store_true')
parser_AlignReads.add_argument('-d', "--deduplicate",
help = "Remove duplicates using Picard",
action = 'store_true')
parser_AlignReads.add_argument('-r', "--indelrealign",
help = "realign read alignments near potential indels using GATK",
action = 'store_true')
parser_AlignReads.add_argument('-p', "--prepared",
help = "if your reads were trimmed and cleaned etc. already, without using BAGA's PrepareReads options, this option allows you to continue directly after the CollectData options.",
action = 'store_true')
parser_AlignReads.add_argument('-P', "--GATK_jar_path",
help = "path to Genome Analysis Toolkit (GATK) jar file. See also --JRE_1_7_path if system JAVA is version 1.8",
type = str)
parser_AlignReads.add_argument('-J', "--JRE_1_7_path",
help = "path to JAVA runtime environment version 1.7 binary file for use with GATK versions 3.3 or 3.4 (not compatible with JRE 1.8!)",
type = str)
parser_AlignReads.add_argument('-D', "--delete_intermediates",
help = "delete intermediate SAM and BAM files to save space. Files are only deleted if those for next stage are found",
action = 'store_true')
parser_SimulateReads = subparser_adder.add_parser('SimulateReads',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Simulate reads with optional '\
'variants from a reference genome sequence.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Example usage: "%(prog)s --genome_name FM209186 --num_SNPs 100"\n',
text_width, replace_whitespace = False))
parser_SimulateReads.add_argument('-g', "--genome_name",
help = "name of genome obtained by the CollectData option from which to generate reads from",
type = str,
required = True)
parser_SimulateReads.add_argument('-N', "--max_cpus",
help = "maximum number of cpu cores used when parallel processing",
type = int,
default = -1)
parser_SimulateReads.add_argument('-G', "--gemsim",
help = "generate reads using GemSIM",
action = 'store_true')
parser_SimulateReads.add_argument('-n', "--num_individuals",
help = "genome population size",
type = int,
default = 1)
parser_SimulateReads.add_argument('-D', "--large_deletions",
help = "ranges of large deletions e.g., prophage in reference missing in "\
"samples. If specified, a second set of individuals are generated "\
"with these deletions. Currently, total variants are shared among "\
"this double sized population. In the future, the second set will "\
"probably share same variants as first set, the only difference "\
"being the large deletions. e.g., '--large_deletions 10000 20000 "\
"80000 90000' will omit regions 10000-20000 bp and 80000-90000 bp.",
type = int,
nargs = '+',
metavar = 'CHROMOSOME_POSITION')
parser_SimulateReads.add_argument('-r', "--random_seed",
help = "set the random seed for the pseudo-random number generator for "\
"reproducible variants.",
type = int,
default = 684651)
parser_SimulateReads.add_argument('-s', "--num_SNPs",
help = "total single nucleotide polymorphisms in population",
type = int)
parser_SimulateReads.add_argument('-d', "--num_deletions",
help = "total small deletion polymorphisms in population",
type = int)
parser_SimulateReads.add_argument('-i', "--num_insertions",
help = "total small deletion polymorphisms in population",
type = int)
parser_Repeats = subparser_adder.add_parser('Repeats',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Detect repetitive regions in a \
genome sequence. Plot these regions as chromosome map with percent \
identity and some features. The intended use is to mark the repetitive regions \
for exclusion from a short read mapping experiment. Repetitive regions are \
more likely to contain ambiguous variants that may be caused by divergence \
between duplicated regions in the reference genome or sample and not by \
variation at orthologous regions.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('The Repeats finder is expected to \
operate on a genome obtained via the CollectData option.\n\n\
Example usage: %(prog)s --genome_name FM209186.1 --find\n',
text_width, replace_whitespace = False))
parser_Repeats.add_argument('-g', "--genome_name",
help = "name of genome obtained and prepared by the CollectData option",
type = str,
required = True)
parser_Repeats.add_argument('-f', "--find",
help = "find repeats using Burrows-Wheeler Aligner (BWA) alignments",
action = 'store_true')
parser_Repeats.add_argument('-m', "--method",
help = "The baga method (default) can be used as part of a variant "\
"calling pipeline by providing a variant filter at regions that are "\
"so similar, they might cause ambiguous read alignment. The nucmer "\
"method (nucmer_check) is to provide a means to compare a previous BAGA "\
"repeats analysis with the 'classic' nucmer method from the MUMmer "\
"package. The BAGA method is much slower but performs optimal global "\
"alignments between repeats and performs codon alignments where "\
"appropriate, so is more accurate than nucmer but much slower.",
type = str,
default = "baga",
choices = ["baga","nucmer_check"])
parser_Repeats.add_argument('-i', "--minimum_percent_identity",
help = "the lowest nucleotide percent identity permitted repeat over regions",
type = int,
default = 98)
parser_Repeats.add_argument('-l', "--minimum_repeat_length",
help = "shortest repeat length: should be similar to insert size of paired end reads as smaller (non-tandem) repeats should be resolvable and have unambiguous mappings",
type = int,
default = 400)
parser_Repeats.add_argument('-p', "--plot",
help = "plot repeats using svgwrite library found using '--find'",
action = 'store_true')
parser_Repeats.add_argument('-s', "--summarise",
help = "summarise repeats found using '--find' as a .csv file and printed to screen",
action = 'store_true')
parser_Structure = subparser_adder.add_parser('Structure',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Detect regions with rearrangements \
between sample genome and reference genome sequence by examining distribution of \
inferred insert sizes from paired end short reads of sample aligned to reference. \
Proportion of paired reads not designated as "proper_pair" by Burrows-Wheeler \
Aligner is expected to increase over regions of sequence rearrangements. Alignments \
in these regions violate assumptions of read mapping method and and any variants \
called at these regions should be excluded from down-stream analyses. \n\nPlot \
these regions as chromosome map with ratio of non-proper pair to proper pair \
classifications and threshold at which deemed to deviate from expected.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('The Structure checker is expected to \
operate on a genome obtained via the CollectData option; and either a set of BAM \
files in a user provided path or a reads group processed with the AlignReads option. \
\n\n\
Example usages:\n\
%(prog)s --genome_name FM209186.1 --reads_name Liverpool --check\n\
%(prog)s --genome_name FM209186.1 --alignments_paths path/to/my/bams --check\n',
text_width, replace_whitespace = False))
mutually_exclusive_group = parser_Structure.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument('-n', "--reads_name",
help = "name of read datasets group if processed by PrepareReads and AlignReads options. Should match --reads_name option used previously",
type = str)
mutually_exclusive_group.add_argument('-a', "--alignments_paths",
help = "path to paired-end short read alignments to a reference genome. If a directory path(s) is provided, all *.BAM and *.bam files will be included. A list of filenames with full path or with *, ? or [] characters can also be provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+')
mutually_exclusive_group.add_argument('-b', "--checkinfos_path",
help = "path to baga.Structure.CheckerInfo-<sample_name>__<genome_name>.baga files for summarising regions indicated by rearrangements filter with --summarise. Alternatively use --summarise with --reads_name and --genome_name if available (typically as part of a baga short read analysis pipeline). If a directory path(s) is provided, all baga.Structure.CheckerInfo-*__*.baga files will be included. A list of filenames with full path or with *, ? or [] characters can also be provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+')
parser_Structure.add_argument('-g', "--genome_name",
help = "required with --reads_name option. Name of genome obtained by the CollectData option and used with AlignReads option",
type = str)
parser_Structure.add_argument('-c', "--check",
help = "check sequence rearrangements using paired end insert sizes inferred by Burrows-Wheeler Aligner (BWA) alignments",
action = 'store_true')
parser_Structure.add_argument('-p', "--plot",
help = "plot regions affected by structural rearrangements found using '--check' Uses the svgwrite library",
action = 'store_true')
parser_Structure.add_argument('-r', "--plot_range",
help = "plot a specific region to see how it was affected by structural rearrangements found using '--check'. Uses the svgwrite library",
type = int,
nargs = 2,
metavar = 'CHROMOSOME_POSITION')
parser_Structure.add_argument('-t', "--ratio_threshold",
help = "When checking for rearrangements, the ratio of non-proper pair to proper pair assigned reads above this value cause a region of read alignments to be considered rearranged. This ratio tends to zero rhen the distance between aligned paired reads is close to the expectation according to the estimated mean fragment size. It increases to around one adjacent to rearrangements e.g., within a fragment's length of a large deletion in the sample/insertion in the reference. Lower values are more sensitive to rearrangements but might include false positive rearrangements, but these can be examined by local de novo assembly of reads and pairwise alignment of contig with reference. If used to filter regions affected by unreliable short read alignments for variant calling, lower values are more conservative (will exclude more false positive variants) but might cause omission of true positive variants. Default = 0.15",
type = float,
default = 0.15,
metavar = 'FLOAT')
## these two should probably be in a parent parser inherited for most tasks
parser_Structure.add_argument('-S', "--include_samples",
help = "With --plot or --plot_range, restrict plotting to these samples else if omitted, plots for all samples are produced.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_Structure.add_argument('-e', "--exclude_samples",
help = "exclude these samples from the analysis. If omitted, no samples are excluded.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_Structure.add_argument('-s', "--summarise",
help = "summarise regions of putative rearrangements found using '--check' as a .csv file and printed to screen. Requires --genome and --reads_name, optionally with --include_samples. Or just --genome_name and --include_samples to specify samples with corresponding 'baga.Structure.CheckerInfo-<sample_name>__<genome_name>.baga' available in current folder.",
action = 'store_true')
parser_Structure.add_argument('-C', "--collect",
help = "extract short reads aligned to regions of putative rearrangements found using '--find', write to a .fastq file, assemble each of them using SPAdes and align contigs back to reference chromosome. Requires --reads_name, optionally with --include_samples to limit to specific samples and --collect_range to specify a range to collect reads for if not those already found by '--find'. Alternatively use --checkinfos_path to specify samples with corresponding 'baga.Structure.CheckerInfo-<sample_name>__<genome_name>.baga'.",
action = 'store_true')
parser_Structure.add_argument('-R', "--collect_ranges",
help = "extract short reads aligned at specified region or regions, write to a .fastq file, assemble de novo using SPAdes and align contigs back to reference chromosome. --num_padding_positions will be set to zero. If more than one region set e.g., --collect_ranges 10000 20000 80000 90000, the reads in range 10000-20000 bp and 80000-90000 bp along with unmapped and poorly mapped reads will be assembled together.",
type = int,
nargs = '+',
metavar = 'CHROMOSOME_POSITION')
parser_Structure.add_argument('-F', "--force",
help = "overwrite existing assemblies when using --collect/-C.",
action = 'store_true',
default = False)
parser_Structure.add_argument('-P', "--num_padding_positions",
help = "for the de novo assembly of collected reads that were aligned/mapped to the reference genome at regions of putative rearrangements, optionally specify additional padding (bp) around each region to collect more reads and assmeble longer contigs for pairwise alignment back to the reference",
type = int,
default = 5000,
metavar = 'NUM_BASEPAIRS')
parser_Structure.add_argument('-m', "--max_memory",
help = "maximum memory to use in gigabytes for each assembly. If not specified, total available at launch time will be used.",
type = int)
parser_Structure.add_argument('-l', "--min_align_region",
help = "when using --collect, set minimum region to align among those reported as potentially rearranged (by --check).",
type = int,
default = 200)
parser_CallVariants = subparser_adder.add_parser(
'CallVariants',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill(
'Call variants with Genome Analysis Tool Kit (GATK) from each '\
'of a group of read sets previously aligned to a genome via the '\
'PrepareReads option.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Groups of read sets from the AlignReads option '\
'are loaded by providing the name supplied previously.\n\n'\
'Example usage: %(prog)s --reads_name ERR953490plus5others '\
'--calleach --calljoint --hardfilter\n',
text_width, replace_whitespace = False))
parser_CallVariants.add_argument('-r', "--reads_name",
help = "name of read datasets group processed by PrepareReads and "\
"AlignReads options. For GATK a single group can be processed. For "\
"DiscoSNP++, one or more groups can be processed",
type = str,
required = False,
nargs = '+')
parser_CallVariants.add_argument('-g', "--genome_name",
help = "name of genome obtained by the CollectData option",
type = str,
required = False)
parser_CallVariants.add_argument('-F', "--force",
help = "overwrite existing per isolate data: required when repeating an "\
"analysis else previous versions retained. Retention of previous versions "\
"is convenient for resuming an interrupted analysis in which only some "\
"read sets were processed.",
action = 'store_true',
default = False)
parser_CallVariants.add_argument('-C', "--check",
help = "check previously called variants in a VCF by de novo assembly of "\
"relevent shorts reads and aligning the contigs to each variant part of "\
"the reference genome.",
action = 'store_true',
default = False)
parser_CallVariants.add_argument('-p', "--vcfs_paths",
help = "path to vcf files. If a directory path(s) is provided, all *.VCF "\
"and *.vcf files will be included. A list of filenames with full path or "\
"with *, ? or [] characters can also be provided (with unix-style pathname "\
"pattern expansion for the latter)",
type = str,
nargs = '+')
parser_CallVariants.add_argument('-a', "--alignments_paths",
help = "path to paired-end short read alignments to a reference genome. If a "\
"directory path(s) is provided, all *.BAM and *.bam files will be included. A "\
"list of filenames with full path or with *, ? or [] characters can also be "\
"provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+',
metavar = 'PATH_TO_BAMs')
parser_CallVariants.add_argument('-N', "--new",
help = "start new variant calling: required when starting whole \
CallVariants process again from prepared read alignments.",
action = 'store_true',
default = False)
parser_CallVariants.add_argument('-n', "--max_cpus",
help = "maximum number of cpu cores used when parallel processing",
type = int,
default = -1)
parser_CallVariants.add_argument('-m', "--max_memory",
help = "maximum memory to use in gigabytes. If not "\
"specified, GATK will use 8GB, SPAdes will use total "\
"available at launch time.",
type = int)
parser_CallVariants.add_argument('-s', "--callsingles",
help = "call variants in each alignment on a per sample basis (not for a joint "\
"analysis, see --calleach and --calljoint for that). "\
"Called 1st time on uncalibrated alignments, called 2nd after base quality score "\
"recalibration.",
action = 'store_true')
parser_CallVariants.add_argument('-c', "--calleach",
help = "call variants in each alignment in preparation for a joint analysis. "\
"Called 1st time on uncalibrated alignments, called 2nd after base quality score "\
"recalibration.",
action = 'store_true')
parser_CallVariants.add_argument('-j', "--calljoint",
help = "call variants in all alignments in a joint analysis. Called 1st time "\
"on uncalibrated alignments, called 2nd after base quality score recalibration.",
action = 'store_true')
parser_CallVariants.add_argument('-f', "--hardfilter",
help = "apply 'hard filtering' thresholds on called variants to decrease "\
"false positives using GATK",
action = 'store_true')
parser_CallVariants.add_argument('-R', "--recalibrate",
help = "apply read base quality score recalibration using GATK",
action = 'store_true')
parser_CallVariants.add_argument('-P', "--GATK_jar_path",
help = "path to Genome Analysis Toolkit (GATK) jar file. See also "\
"--JRE_1_7_path if system JAVA is version 1.8",
type = str)
parser_CallVariants.add_argument('-J', "--JRE_1_7_path",
help = "path to JAVA runtime environment version 1.7 binary file for use "\
"with GATK versions 3.3 or 3.4 (not compatible with JRE 1.8!)",
type = str)
parser_CallVariants.add_argument('-d', "--calldisco",
help = "call variants de novo from short reads using DiscoSNP++.",
action = 'store_true')
parser_CallVariants.add_argument('-e', "--use_existing_graph",
help = "Use previously generated DiscoSNP++ graph. Be sure the last graph generated matches the specified reads!",
action = 'store_true')
parser_CallVariants.add_argument('-A', "--arguments",
help = "Send direct arguments to a wrapped tool. E.g. --arguments DiscoSNP++ '_k 41'. The leading dashes (-) must be replaced with underscores (_) and the arguments must be in quotations",
nargs = '*')
parser_FilterVariants = subparser_adder.add_parser('FilterVariants',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Apply filters determined by \
the Repeats and Structure options on variant calls and report tables of \
effects of filters on different classes of variants. VCF files will be \
copied with updated information marking certain variants to be excluded. \
Vaiants can be inferred using the CallVariants option.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Filter regions and VCF files are \
loaded by providing the read set names or VCF filenames.\n\n\
Example usage: %(prog)s --reads_name ERR953490plus5others \
--genome FM209186 --filters genome_repeats rearrangements\n',
text_width, replace_whitespace = False))
mutually_exclusive_group = parser_FilterVariants.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument('-n', "--reads_name",
help = "name of read datasets group if processed by PrepareReads and AlignReads options. Should match --reads_name option used previously",
type = str,
nargs = '+')
mutually_exclusive_group.add_argument('-p', "--vcfs_paths",
help = "path to vcf files. If a directory path(s) is provided, all *.VCF "\
"and *.vcf files will be included. A list of filenames with full path or "\
"with *, ? or [] characters can also be provided (with unix-style pathname "\
"pattern expansion for the latter)",
type = str,
nargs = '+')
parser_FilterVariants.add_argument('-g', "--genome_name",
help = "name of genome obtained by the CollectData option",
type = str,
required = True)
parser_FilterVariants.add_argument('-f', "--filters",
help = "names of filters to apply. One or more of: genome_repeats provided by the Repeats option; rearrangements provided by the Structure option.",
type = str,
nargs = '+',
# these choices must correspond to known_filters in CallVariants.Filter.__init__()
# or to other_filters which are listed by variant calling program, currently just GATK
choices = ['genome_repeats','rearrangements','GATK'],
metavar = 'FILTER_NAME',
required = True)
parser_FilterVariants.add_argument('-s', "--include_samples",
help = "restrict filtering to these samples. If omitted, plots for all samples are produced.",
type = str,
nargs = '+')
parser_FilterVariants.add_argument('-i', "--path_to_rearrangements_info",
help = "optionally supply path to where rearrangement filter information is for all samples (if not in current directory; they look like baga.Structure.CheckerInfo-<samplename>__<genomename>.baga). These must be generated using the 'Structure --check' option and are each generated from the same .bam alignment file as the corresponding, supplied VCF files.",
type = str)
parser_SummariseVariants = subparser_adder.add_parser('SummariseVariants',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Generate various .csv text files for '\
'convenient viewing and downstream analysis.', text_width,
replace_whitespace = False),
epilog = textwrap.fill('Example usage: %(prog)s --simple --vcfs_paths '\
'path/to/*.vcf\n', text_width, replace_whitespace = False))
mutually_exclusive_group = parser_SummariseVariants.add_mutually_exclusive_group(required=True)
# either: reads_name and genome_name to infer CallVariants.Caller object path
mutually_exclusive_group.add_argument('-n', "--reads_name",
help = "name of read datasets group if processed by CallVariants options. "\
"Should match --reads_name option used previously",
type = str,
nargs = '+')
mutually_exclusive_group.add_argument('-p', "--vcfs_paths",
help = "path to vcf files. If a directory path(s) is provided, all *.VCF "\
"and *.vcf files will be included. A list of filenames with full path or "\
"with *, ? or [] characters can also be provided (with unix-style pathname "\
"pattern expansion for the latter)",
type = str,
nargs = '+')
# even without reads_name, genome is still useful to provide additional annotations
parser_SummariseVariants.add_argument('-g', "--genome_names",
help = "name of genome obtained by the CollectData option",
type = str,
nargs = '+')
parser_SummariseVariants.add_argument('-f', "--filters",
help = "names of filters to apply. One or more of: genome_repeats provided "\
"by the Repeats option; rearrangements provided by the Structure option.",
type = str,
nargs = '+',
# these choices must correspond to known_filters in CallVariants.Filter.__init__()
# or to other_filters which are listed by variant calling program, currently just GATK
choices = ['genome_repeats','rearrangements','GATK'],
metavar = 'FILTER_NAME')
parser_SummariseVariants.add_argument('-S', "--simple",
help = "generate a simple .csv table corresponding to the VCF file rows.",
action = 'store_true')
parser_SummariseVariants.add_argument('-L', "--lists",
help = "generate a .csv table listing allele frequencies in whole dataset, "\
"between samples and reference and among samples excluding reference.",
action = 'store_true')
parser_SummariseVariants.add_argument('-C', "--cumulative",
help = "summarise the cumulative effect of filters in .csv table.",
action = 'store_true')
parser_SummariseVariants.add_argument('-s', "--include_samples",
help = "restrict filtering to these samples. If omitted, plots for all "\
"samples are produced.",
type = str,
nargs = '+')
parser_CheckLinkage = subparser_adder.add_parser('CheckLinkage',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Check for alleles on the same \
read or read pair (fragment; and therefore the same chromosomes) called at \
polymorphisms in a sample of pooled genomic DNA. Infrequent co-incidence of \
variants on the same read in nearby polymorphisms implies variants occuring in \
different genomes in the sample (separate lineages) and has been described as a \
"multidiverse" signature in:\n\n\
Lieberman, T. D., Flett, K. B., Yelin, I., Martin, T. R., McAdam, A. J., Priebe, G. P. & Kishony, R. \n\
Genetic variation of a bacterial pathogen within individuals with cystic fibrosis provides a record of selective pressures.\n\
Nature Genetics, 2013, 46, 82-87.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('The linkage checker is expected to \
operate on a short reads aligned to a genome and the corresponding variants. \
These can be provided by a previous BAGA analysis or by providing paths to BAM \
and VCF files. \
\n\n\
Example usages:\n\
%(prog)s --genome_name FM209186.1 --reads_name Liverpool --check\n\
%(prog)s --alignments_paths path/to/my/bams --vcfs_paths path/to/my/vcfs --check --genome_name FM209186.1\n',
text_width, replace_whitespace = False))
mutually_exclusive_group = parser_CheckLinkage.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument('-n', "--reads_name",
help = "name of read datasets group if processed by PrepareReads and AlignReads options. Should match --reads_name option used previously",
type = str)
mutually_exclusive_group.add_argument('-a', "--alignment_paths",
help = "path to paired-end short read alignments to a reference genome. If a directory path(s) is provided, all *.BAM and *.bam files will be included. A list of filenames with full path or with *, ? or [] characters can also be provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+',
metavar = 'PATH_TO_BAMs')
parser_CheckLinkage.add_argument('-g', "--genome_name",
help = "Name of genome obtained by the CollectData option and if the BAGA variant calling pipeline was used, the same genome used with AlignReads and CallVariants options",
type = str,
required=True)
parser_CheckLinkage.add_argument('-p', "--vcfs_paths",
help = "path to vcf files. If a directory path(s) is provided, all *.VCF and *.vcf files will be included. A list of filenames with full path or with *, ? or [] characters can also be provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+',
metavar = 'PATH_TO_VCFs')
parser_CheckLinkage.add_argument('-c', "--check",
help = "check variant linkage on paired reads (fragments) in a pooled dataset",
action = 'store_true')
parser_CheckLinkage.add_argument('-S', "--include_samples",
help = "Restrict checking to these samples else if omitted, all samples are checked.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_CheckLinkage.add_argument('-F', "--force",
help = "overwrite existing files when using --collect/-C.",
action = 'store_true',
default = False)
parser_ComparativeAnalysis = subparser_adder.add_parser('ComparativeAnalysis',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Build multiple sequence alignments \
from SNPs in VCFs with a reference genome, infer phylogenies and homologous \
recombination events.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Example usage: %(prog)s --buildMSA \
--reads_name Liverpool --genome FM209186.1 --include_invariants\n',
text_width, replace_whitespace = False))
mutually_exclusive_group1 = parser_ComparativeAnalysis.add_mutually_exclusive_group(required=True)
mutually_exclusive_group1.add_argument('-m', "--build_MSA",
help = "build a multiple sequence alignment from a reference genome and SNPs listed in VCF files",
action = 'store_true')
mutually_exclusive_group1.add_argument('-i', "--infer_phylogeny",
help = "infer a phylogeny from a multiple sequence alignment",
action = 'store_true')
mutually_exclusive_group1.add_argument('-r', "--infer_recombination",
help = "infer recombination from a phylogeny and multiple sequence alignment",
action = 'store_true')
mutually_exclusive_group1.add_argument('-p', "--plot_phylogeny",
help = "plot a phylogeny, possibly including recombination inferences",
action = 'store_true')
# for build_MSA
parser_ComparativeAnalysis.add_argument('-g', "--genome_name",
help = "name of genome obtained by the CollectData option on which to base a MSA along with SNPs",
type = str)
mutually_exclusive_group2 = parser_ComparativeAnalysis.add_mutually_exclusive_group(required=False)
mutually_exclusive_group2.add_argument('-n', "--reads_name",
help = "name of read datasets groups to include in an MSA, if processed by PrepareReads and AlignReads options. Should match --reads_name option used previously",
type = str,
nargs = '+')
mutually_exclusive_group2.add_argument('-v', "--vcfs_paths",
help = "path to vcf files. If a directory path(s) is provided, all *.VCF and *.vcf files will be included. A list of filenames with full path or with *, ? or [] characters can also be provided (with unix-style pathname pattern expansion for the latter)",
type = str,
nargs = '+')
parser_ComparativeAnalysis.add_argument('-s', "--include_samples",
help = "restrict MSA to these samples. If omitted, all samples are included.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_ComparativeAnalysis.add_argument('-e', "--exclude_samples",
help = "exclude these samples from MSA. If omitted, no samples are excluded.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_ComparativeAnalysis.add_argument('-l', "--include_invariants",
help = "include invariant sites to make a Long alignment. Invariant sites are required to accurately estimate some parameters",
action = 'store_true')
parser_ComparativeAnalysis.add_argument('-c', "--core_only",
help = "only include sites present in all samples",
action = 'store_true')
parser_ComparativeAnalysis.add_argument('-B', "--sample_bams",
help = "path to file linking sample names to BAM files to collect reference genome coverage information. Format: prer line <sample name><tab><path to BAM>",
type = str)
# for infer_phylo
parser_ComparativeAnalysis.add_argument('-M', "--path_to_MSA",
help = "path to multiple nucleotide sequence alignment (e.g. as generated by --build_MSA). Amino acid sequences will be implemented in the future.",
type = str)
parser_ComparativeAnalysis.add_argument('-P', "--program",
help = "software to use for inference: currently only phyML!",
type = str,
choices = ['phyML'],
default = 'phyML')
parser_ComparativeAnalysis.add_argument('-o', "--out_group",
help = "outgroup for rooting. Can be single sample name or several but must be monophyletic: not checked!",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
# other parameters for phyML to follow
# infer phylogeny and recombination
parser_ComparativeAnalysis.add_argument('-b', "--num_bootstraps",
help = "number of bootstrap replicates for --infer_phylo or --infer_recombination.",
type = int,
default = 0)
# infer recombination
parser_ComparativeAnalysis.add_argument('-T', "--plot_transfers",
help = "plot a phylogeny, also plot transfers inferred by ClonalFrame (in <treename>.importation_status.txt)",
action = 'store_true')
parser_ComparativeAnalysis.add_argument('-t', "--path_to_tree",
help = "path to starting phylogeny for inferring recombination over and correct.",
type = str)
# plot phylogeny
# parser_ComparativeAnalysis.add_argument('-t', "--path_to_CFML_tree",
# help = "For plotting phylogeny with transfers, path to phylogeny updated by ClonalFrameML when inferring recombination.",
# type = str)
parser_ComparativeAnalysis.add_argument('-N', "--use_names",
help = "For plotting phylogeny, path to file with tab delimited list of actual tip label and desired label, for translating labels.",
type = str)
parser_ComparativeAnalysis.add_argument('-G', "--genome_length",
help = "For plotting transfers on phylogeny, reference genome length (alternatively supply --genome_name of CollectData saved object).",
type = int)
parser_AssembleReads = subparser_adder.add_parser('AssembleReads',
formatter_class = argparse.RawDescriptionHelpFormatter,
description = textwrap.fill('Assemble reads into contiguous chromosome sequences.',
text_width,
replace_whitespace = False),
epilog = textwrap.fill('Example usage: %(prog)s --denovo spades '\
'--reads_name Liverpool\n',
text_width, replace_whitespace = False))
parser_AssembleReads.add_argument('-n', "--reads_name",
help = "name of read datasets groups on which to perform assembly. Reads "\
"should have collected and prepared with BAGA's CollectData and "\
"PrepareReads options. Should match --reads_name option used previously",
required = True,
type = str,
nargs = '+')
parser_AssembleReads.add_argument('-s', "--include_samples",
help = "Implement Me! restrict assembly to these samples within the read "\
"datasets groups provided by --reads_name/-n. If omitted, all samples are "\
"included.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_AssembleReads.add_argument('-e', "--exclude_samples",
help = "Implement Me! exclude these samples from assembly from among samples "\
"within the read datasets groups provided by --reads_name/-n. If omitted, no "\
"samples are excluded.",
type = str,
nargs = '+',
metavar = 'SAMPLE_NAME')
parser_AssembleReads.add_argument('-P', "--program",
help = "software to use for assembly: currently only spades!",
type = str,
choices = ['spades'],
default = 'spades')
parser_AssembleReads.add_argument('-m', "--max_memory",
help = "maximum memory to use in gigabytes for each assembly. If not "\
"specified, total available at launch time will be used.",
type = int)
if '--nosplash' not in sys.argv:
print(splash)
args = parser.parse_args()
# if args.verbose:
# print("verbosity turned on")
# if args.quiet:
# print("output suppressed")
if hasattr(args, 'GATK_jar_path') and args.GATK_jar_path:
# check Java is installed and version
use_java = 'java'
if hasattr(args, 'JRE_1_7_path') and args.JRE_1_7_path:
#'-j', "--JRE_1_7_path"
use_java = args.JRE_1_7_path
print('Using provided JAVA JRE at {}'.format(use_java))
else:
print('Using system JAVA JRE')
try:
p = subprocess.Popen([use_java, '-version'], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
except OSError:
sys.exit('Could not find the Java runtime environment, needed for GATK and Picard. Please ensure you have Java installed and that the "java" executable is in the system path')
try:
java_version_string = p.stdout.read().split('\n')[0]
maj, min1, min2 = re.findall('([0-9])\.([0-9])\.([0-9_]+)', java_version_string)[0]
maj, min1 = map(int,[maj, min1])
if maj == 1 and min1 == 7:
print('Java 1.7: found!')
else:
sys.exit('GATK v3.3 requires Java v1.7 but your version is {}.{}. Please install Java v1.7 to continue or use --JRE_1_7_path to specify Java v1.7 binary to use.'.format(maj,min1))
except IndexError:
print(output)
sys.exit('There was a problem checking your Java version. Please report this as a baga bug at https://github.com/daveuu/baga/issues.')
# check GATK jar file
p = subprocess.Popen([use_java, '-Xmx8g', '-jar', args.GATK_jar_path, '--help'], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
output = p.stdout.read()
if 'Error: Unable to access jarfile' in output:
sys.exit('Java could not find jar file for GATK at: {}\nPlease provide path to GATK v3.3 jar file with --GATK_jar_path.'.format(args.GATK_jar_path))
else:
try:
maj, min1, min2, min3 = re.findall('\(GATK\) v([0-9])\.([0-9]+)-([0-9]+)-([a-z0-9]+),', output.split('\n')[1])[0]
maj, min1, min2 = map(int, [maj, min1, min2])
if maj == 3 and min1 == 3:
print('GATK v3.3: found!')
elif maj == 3 and min1 > 3:
print('Found GATK v{}.{}. Should be OK but this version of baga was tested with v3.3 . . .'.format(maj,min1))
elif maj == 3 and min1 < 3:
print('WARNING: Found GATK v{}.{}. Not tested in this baga version (v3.3 was) . . . use at your own risk!')
else:
sys.exit('Expecting GATK version 3.3 but found {}.{}\nPlease provide path to GATK v3.3 jar file with --GATK_jar_path.'.format(maj,min1))
except IndexError:
print(output)
sys.exit('There was a problem checking your --GATK_jar_path argument. Please report this as a baga bug.')
### Check Dependencies ###
if args.subparser == 'Dependencies':
print('\n-- Dependencies check/get module --')
if args.versions_file:
try:
use_versions_file = args.versions_file
versions = open(use_versions_file).read()
except IOError:
use_versions_file = os.path.sep.join(sys.argv[0].split(os.path.sep)[:-1] + ['versions.yaml'])
try:
versions = open(use_versions_file).read()
except IOError:
use_versions_file = False
updated = False
if use_versions_file:
# parse it
use_versions = {}
main_key = None
for line in versions.split('\n'):
if not line.startswith('#') and len(line.rstrip('\n')) > 0:
if not line.startswith(' '):
main_key,value = line.rstrip('\n').split(':',1)
else:
this_key,value = line.rstrip('\n').lstrip(' ').split(':',1)
try:
use_versions[main_key][this_key] = value.strip(' ')
except KeyError:
use_versions[main_key] = {this_key : value.strip(' ')}
for external_name, info in use_versions.items():
for key, new in info.items():
if new == 'None':
new = None
try:
current = Dependencies.dependencies[external_name][key]
updated = True
if current != new:
Dependencies.dependencies[external_name][key] = new
print('{} for {} updated from {} to {}'.format(key,
external_name, current, new))
except KeyError:
print('Warning: ignoring unrecognised entry in {}: '\
'{} => {}'.format(use_versions_file,external_name,key))
if updated:
print('Used {} for version information'.format(os.path.abspath(use_versions_file)))
def get(name):
if Dependencies.dependencies[name]['source'] == 'git':
Dependencies.get_git(**Dependencies.dependencies[name])
elif Dependencies.dependencies[name]['source'] == 'download':
Dependencies.get_download(**Dependencies.dependencies[name])
def check(name):
# this would need changing with the dependencies dict in Dependencies
if 'local_packages' in Dependencies.dependencies[name]['destination']:
checker = Dependencies.dependencies[name]['checker']['function']
checker_args = Dependencies.dependencies[name]['checker']['arguments']
result = checker(Dependencies.dependencies[name]['name'], **checker_args)
#checker(dependencies[name]['name'], system = True)
elif 'external_programs' in Dependencies.dependencies[name]['destination']:
checker = Dependencies.dependencies[name]['checker']['function']
checker_args = Dependencies.dependencies[name]['checker']['arguments']
result = checker(**checker_args)
else:
sys.exit('The destination for this package or program is unknown: {}\n'.format(
Dependencies.dependencies[name]['destination'])
)
return(result)
def checkpackage(name):
'''this calls a conventional --check for checking new python packages'''
import subprocess
cmd = [sys.argv[0], '--nosplash', 'Dependencies', '--check', name]
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
o,e = proc.communicate()
#print(e)
if proc.returncode == 1:
return(False)
elif proc.returncode == 0:
return(True)
def checkget(checkgetthese):
check_summary = []
check_results = []
for name in checkgetthese:
print('\nChecking for {}:\n'.format(name))
alreadygot = check(name)
if alreadygot:
check_summary += ['\n{}: found!'.format(name)]
check_results += [alreadygot]
else:
check_summary += ['\n{0}: not found . . . \nAttempting to install.\n'.format(name, sys.argv[0])]
get(name)
if 'local_packages' in Dependencies.dependencies[name]['destination']:
# it's a python package so need to forget the import if it was an older version and re-import
# imported as _name to avoid collisions . . .
try:
del sys.modules[name]
#del globals()['_'+name]
except KeyError:
pass
if Dependencies.dependencies[name]['checker']['function'] == Dependencies.check_python_package:
gotnow = checkpackage(name)
else:
gotnow = check(name)
if gotnow:
check_summary[-1] += "Installed successfully: found!"
else:
check_summary[-1] += "Failed to install . . . there may be dependencies missing or some other problem . . ."
check_results += [gotnow]
return(check_summary,check_results)
if args.get:
for name in args.get:
get(name)
if args.check:
check_summary = []
check_results = []
for name in args.check:
print('\nChecking for {}:\n'.format(name))
check_results += [check(name)]
if check_results[-1]:
check_summary += ['\n{}: found!'.format(name)]
else:
check_summary += ['\n{0}: not found . . . \nTry "{1} Dependencies --get {0}"'.format(name, sys.argv[0])]
print(check_summary[-1])
if len(check_summary) > 1:
print(''.join(['\n\nSummary:\n'] + sorted(check_summary))+'\n')
if not all(check_results):
sys.exit('\n\nOne or more dependencies are unavailable . . . \n')
if args.checkpackage:
# call baga to do a conventional check on a python package
result = checkpackage(args.checkpackage)
if result:
print('\n{}: found!'.format(args.checkpackage))
else:
print('\n{0}: not found . . . \nTry "{1} Dependencies --get {0}"'.format(args.checkpackage, sys.argv[0]))
if args.checkget:
check_summary,check_results = checkget(args.checkget)
if len(check_summary):
print(''.join(['\n\nSummary:\n'] + sorted(check_summary))+'\n')
if not all(check_results):
sys.exit('\n\nOne or more baga dependencies are unavailable and could not be installed . . . \n')
if args.checkgetfor:
for task in args.checkgetfor:
checkthese = sorted(Dependencies.dependencies_by_task[task])
print('Checking on dependencies for {} ({})'.format(task, ', '.join(checkthese)))
check_summary,check_results = checkget(checkthese)
if len(check_summary):
print(''.join(['\n\nSummary for {}:\n'.format(task)] + sorted(check_summary))+'\n')
if not all(check_results):
sys.exit('\n\nOne or more baga dependencies are unavailable and could not be installed . . . \n')
### Download Genomes ###
if args.subparser == 'CollectData':
print('\n-- Data collection module --')
if args.genomes is not None:
from baga import CollectData
# work out what accessions and/or paths to genbank files were provided
if ',' in args.genomes:
# allow for single string with ',' delimiter
use_genome = args.genomes.split(',')
else:
use_genome = args.genomes
load_gbks = []
load_bagas = []
collect_accessions = []
for g in use_genome:
genome_no_quotes = g.strip('"').strip("'")
if re.search('\.gbk|\.GBK', genome_no_quotes):
load_gbks += [genome_no_quotes]
elif re.search('\.baga|\.baga', genome_no_quotes):
load_bagas += [genome_no_quotes]
else:
# anything that isn't .gbk or .baga is assumed to be an accession number
collect_accessions += [genome_no_quotes]
# check email address provided for Entrez, if any accession found
if args.email_address is None and len(collect_accessions) > 0:
print(textwrap.fill('User email address is required for downloading from NCBI \
with accession numbers. Detected %s items without ".gbk" or ".GBK" and assumed to be \
accession numbers' % (len(collect_accessions)), text_width))
sys.exit(1)
loaded_genomes = {}
for gbk in load_gbks:
print('Loading {}'.format(gbk))
genome = CollectData.Genome(local_path = gbk, format = 'genbank')
print('Storing for future use . . .')
genome.saveLocal()
#loaded_genomes[genome.id] = genome
for baga in load_bagas:
print('Loading {}'.format(baga))
genome = CollectData.Genome(local_path = gbk, format = 'baga')
print('Storing for future use . . .')
genome.saveLocal()
#loaded_genomes[genome.id] = genome
for genome_accession in collect_accessions:
print('Fetching {}'.format(genome_accession))
genome = CollectData.Genome(accession = genome_accession, user_email = args.email_address)
print('Storing for future use . . .')
genome.saveLocal()
#loaded_genomes[genome.id] = genome
# # download genomes from NCBI
# if len(collect_accessions) > 0:
# print('Found accessions for collection from NCBI:\n%s' % '\n'.join(collect_accessions))
# for genome_accession in collect_accessions:
# genome = CollectData.Genome()
# print('Downloading: %s from NCBI' % genome_accession)
# genome.getFromNCBI(genome_accession, args.email_address)
# loaded_genomes[genome.genome_genbank_record.name.replace(' ','_')] = genome
# # extract ORF IDs and chromosome ranges
# for name, loaded_genome in loaded_genomes.items():
# print('Extracting loci from %s' % name)
# loaded_genome.extractLoci()
# loaded_genome.saveLocal(name)
# no option to not extractLoci for now
def sanitize_filename(proposed_name):
invalidChars = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~ '
sanitised = set()
use_name = []
for l in proposed_name:
if l in invalidChars:
use_name += ['_']
sanitised.add(l)
else:
use_name += [l]
use_name = ''.join(use_name)
return(use_name, sanitised)
### (down)load Reads ###
if args.subparser == 'CollectData':
if args.reads_download is not None:
from baga import CollectData
# only download so only accession numbers?
if ',' in args.reads_download:
# allow for single string with ',' delimiter
use_reads = args.reads_download.split(',')
else:
use_reads = args.reads_download
read_accessions = [r.strip('"').strip("'") for r in use_reads]
read_accessions.sort()
reads = CollectData.Reads()
reads.getFromENA(read_accessions)
# make a name from run accessions . . .
if args.reads_group_name is None:
use_reads_group_name = read_accessions[0]+'plus%sothers' % (len(read_accessions) - 1)
print('No reads group name provided. Using: {}'.format(use_reads_group_name))
else:
use_reads_group_name, sanitised = sanitize_filename(args.reads_group_name)
if len(sanitised) > 0:
print('\nWarning: replaced {} with _ in provided reads group name.'.format(', '.join(sanitised)))
print('Provided: {}'.format(args.reads_group_name))
print('Using: {}\n\n'.format(use_reads_group_name))
reads.saveLocal(use_reads_group_name)
### Load Reads from path ###
if args.subparser == 'CollectData':
if args.reads_path is not None:
# check read group name provided
if args.reads_group_name is None:
print(textwrap.fill('--reads_group_name is required for loading reads from a local path', text_width))
sys.exit(1)
use_reads_path = []
for path in args.reads_path:
use_reads_path += [path.strip('"').strip("'")]
use_reads_group_name, sanitised = sanitize_filename(args.reads_group_name)
if len(sanitised) > 0:
print('\nWarning: replaced {} with _ in provided reads group name.'.format(', '.join(sanitised)))
print('Provided: {}'.format(args.reads_group_name))
print('Using: {}\n\n'.format(use_reads_group_name))
from baga import CollectData
reads = CollectData.Reads()
reads.getFromPath(use_reads_path)
reads.saveLocal(use_reads_group_name)
### Prepare Reads ###
if args.subparser == 'PrepareReads':
print('\n-- Short Reads Preparation module --')
import baga
if args.reads_name is not None:
use_path_reads,use_name_reads = check_baga_path('baga.CollectData.Reads', args.reads_name)
e = 'Could not locate a saved baga.CollectData.Reads-<reads_name>.baga for reads group given: {}'.format(args.reads_name)
assert all([use_path_reads,use_name_reads]), e
from baga import PrepareReads
if args.subsample_to_cov is not None:
# make a new CallVariants.Reads object
print('Loading reads group %s' % use_name_reads)
downloaded_reads = baga.bagaload('baga.CollectData.Reads-{}'.format(use_name_reads))
reads = PrepareReads.Reads(downloaded_reads)
read_cov_depth, genome_size = args.subsample_to_cov
print('Subsampling reads group {} to {}x coverage for a {:,} bp genome'.format(use_name_reads, read_cov_depth, genome_size))
reads.subsample(genome_size, read_cov_depth, force = args.force)
reads.saveLocal(use_name_reads)
if args.adaptors is not None:
# need to test parallelism with lots of stdout reports
if args.subsample_to_cov is None:
# didn't subsample in current analysis
if args.adaptors == 'fullsample':
print('Not using previously subsampled reads because "--adaptors fullsample" supplied.')
print('Loading collected reads group %s' % use_name_reads)
downloaded_reads = baga.bagaload('baga.CollectData.Reads-%s' % use_name_reads)
reads = PrepareReads.Reads(downloaded_reads)
else:
print('Loading subsampled reads group %s' % use_name_reads)
reads = baga.bagaload('baga.PrepareReads.Reads-%s' % use_name_reads)
try:
reads.cutAdaptors(force = args.force, max_cpus = args.max_cpus)
except OSError:
exe_fail('cutadapt')
reads.saveLocal(use_name_reads)
if args.trim:
# could check whether adaptor cut read files exist
# need to test parallelism with lots of stdout reports
if not args.adaptors:
# load a previously adaptor cut reads set
print('Loading processed reads group %s' % use_name_reads)
reads = baga.bagaload('baga.PrepareReads.Reads-%s' % use_name_reads)
print('\nTrimming reads . . .')
try:
reads.trim(force = args.force, max_cpus = args.max_cpus)
except OSError:
exe_fail('sickle')
reads.saveLocal(use_name_reads)
if args.delete_intermediates:
print('Checking on intermediate fastq files to delete . . .')
if not args.adaptors and not args.trim:
# load a previously adaptor cut reads set
print('Loading processed reads group %s' % use_name_reads)
reads = baga.bagaload('baga.PrepareReads.Reads-%s' % use_name_reads)
total_size = 0
# check stage 1 files and stage 2 files
if hasattr(reads,'read_files') and hasattr(reads,'adaptorcut_read_files'):
stage1s = check_files(reads.read_files)
stage2s = check_files(reads.adaptorcut_read_files)
if stage2s:
if stage1s:
# delete stage 1 files if have all <== not for now . . .
# print('Deleting original or subsampled fastq files . . .')
# total_size += delete_files(reads.read_files, extra = '_subsmp')
print('Retaining original fastq files even though processed versions exist because re-downloading is time consuming!')
else:
print('Some or all of original or subsampled fastq files seem to have been deleted')
else:
print('Missing some cutadapt-processed files: not deleting originals or subsampled')
if hasattr(reads,'adaptorcut_read_files') and hasattr(reads,'trimmed_read_files'):
stage2s = check_files(reads.adaptorcut_read_files)
stage3s = check_files(reads.trimmed_read_files)
if stage3s:
if stage2s:
# delete stage 2 files if have all
print('Deleting cutadapt-processed fastq files . . .')
total_size += delete_files(reads.adaptorcut_read_files)
else:
print('Some or all of cutadapt-processed fastq files seem to have been deleted')
else:
print('Missing some sickle-processed files: not deleting cutadapt-processed')
if total_size:
print('Saved {:.2f} Gb by deleting intermediate files'.format(total_size/1000000000.0))
else:
print('Nothing deleted.')
### Align Reads ###
if args.subparser == 'AlignReads':
print('\n-- Read Aligning module --')
if args.reads_name is not None and args.genome_name is not None:
# first check whether GATK path is needed
if args.indelrealign and not args.GATK_jar_path:
print('''Please supply:
--GATK_jar_path
if using:
--indelrealign
''')
sys.exit(1)
# ensure upstream files are available: genome
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
assert all([use_path_genome,use_name_genome]), e
import baga
from baga import AlignReads
from baga import CollectData
# ensure upstream files are available: reads
if args.prepared:
# not prepared by BAGA, load direct from a CollectData.Reads file
use_path_reads,use_name_reads = check_baga_path(
'baga.CollectData.Reads', args.reads_name)
e = 'Could not locate a saved baga.CollectData.Reads-<reads_name>.baga '\
'for reads group given: {}'.format(args.reads_name)
assert all([use_path_reads,use_name_reads]), e
from baga import PrepareReads
print('Loading reads group %s' % use_name_reads)
downloaded_reads = baga.bagaload('baga.CollectData.Reads-{}'\
''.format(use_name_reads))
reads = PrepareReads.Reads(downloaded_reads)
# generate a PreparedReads.Reads file for use below
# reads already trimmed etc
reads.trimmed_read_files = reads.read_files
reads.saveLocal(use_name_reads)
# BAGA prepared or just generated
use_path_reads,use_name_reads = check_baga_path(
'baga.PrepareReads.Reads', args.reads_name)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga '\
'for reads group given: {}'.format(args.reads_name)
assert all([use_path_reads,use_name_reads]), e
alns_name = '__'.join([use_name_reads, use_name_genome])
if args.align:
print('Loading processed reads group %s' % use_name_reads)
#prepared_reads = baga.bagaload('baga.PrepareReads.Reads-%s' % use_name_reads)
prepared_reads = baga.bagaload(use_path_reads)
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
# create alignment object
alignments = AlignReads.SAMs(reads = prepared_reads, genome = genome)
print('\nAligning reads . . .')
# let BWA estimate insert size and assign proper_pairs
try:
alignments.align(max_cpus = args.max_cpus, force = args.force)
except OSError:
exe_fail('bwa')
try:
alignments.toBAMs(force = args.force, max_cpus = args.max_cpus)
except OSError:
exe_fail('samtools')
# need to include the genome name for aligning a group of reads sets to more than one genome
alignments.saveLocal(alns_name)
if args.deduplicate:
if not args.align:
# add an exception here and inform to use --align first
print('Loading previously processed read alignments: %s' % alns_name)
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}.baga'.format(alns_name))
try:
alignments.removeDuplicates(force = args.force, max_cpus = args.max_cpus)
except OSError:
exe_fail('picard')
try:
alignments.sortIndexBAMs(force = args.force, max_cpus = args.max_cpus)
except OSError:
exe_fail('samtools')
alignments.saveLocal(alns_name)
if args.indelrealign:
if not args.deduplicate:
# add an exception here and inform to use --align first
print('Loading previously processed read alignments: %s' % alns_name)
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}.baga'.format(alns_name))
try:
os.makedirs('genome_sequences')
except OSError:
pass
genome_fna = 'genome_sequences/%s.fna' % alignments.genome_id
if not os.path.exists(genome_fna):
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
SeqIO.write(SeqRecord(Seq(alignments.genome_sequence.tostring()), id = alignments.genome_id),
genome_fna,
'fasta')
alignments.IndelRealignGATK(
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force,
max_cpus = args.max_cpus)
alignments.saveLocal(alns_name)
if args.delete_intermediates:
print('Checking on intermediate alignment files to delete . . .')
if not any([args.indelrealign, args.deduplicate, args.align]):
# load a previously adaptor cut reads set
print('Loading previously processed read alignments: %s' % alns_name)
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}.baga'.format(alns_name))
attrs = ('aligned_read_files', 'paths_to_BAMs', 'paths_to_BAMs_dd', 'paths_to_BAMs_dd_si', 'ready_BAMs')
descr = { 'aligned_read_files':'initial SAMs',
'paths_to_BAMs':'initial BAMs',
'paths_to_BAMs_dd':'deduplicated BAMs',
'paths_to_BAMs_dd_si':'sorted, deduplicated BAMs',
'ready_BAMs':'indel-realigned BAMs'}
total_size = 0
for attr1,attr2 in zip(attrs[:-1],attrs[1:]):
if hasattr(alignments,attr1) and hasattr(alignments,attr2):
have1, have2 = False, False
if attr1 == 'aligned_read_files':
files_to_check1 = alignments.__dict__[attr1].values()
else:
files_to_check1 = alignments.__dict__[attr1]
if attr2 == 'ready_BAMs':
files_to_check2 = alignments.__dict__[attr2][0]
else:
files_to_check2 = alignments.__dict__[attr2]
have1 = check_files(files_to_check1)
have2 = check_files(files_to_check2)
if have2:
if have1:
# delete stage 1 files if have all
print('Deleting {} . . .'.format(descr[attr1]))
total_size += delete_files(files_to_check1)
else:
print('Some or all of {} seem to have been deleted'.format(descr[attr1]))
else:
print('Missing some {}: not deleting {}'.format(descr[attr2],descr[attr1]))
if total_size:
print('Saved {:.2f} Gb by deleting intermediate files'.format(total_size/1000000000.0))
else:
print('Nothing deleted.')
if args.subparser == 'SimulateReads':
print('\n-- Read Simulation module --')
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
import baga
from baga import CollectData
from baga import SimulateReads
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
large_deletions = {}
x = 1
for i in range(len(args.large_deletions))[::2]:
large_deletions['Deletion_{}'.format(x)] = tuple(args.large_deletions[i:i+2])
x += 1
simulator = SimulateReads.Simulator(genome = genome,
num_individuals = args.num_individuals,
large_deletions = large_deletions,
random_seed = args.random_seed)
simulator.do(num_SNPs = args.num_SNPs, num_deletions = args.num_deletions,
num_insertions = args.num_insertions)
if args.gemsim:
simulator.generateReads(max_cpus = args.max_cpus)
### Repeats ###
if args.subparser == 'Repeats':
print('\n-- Chromosome repeats detection module --')
import baga
from baga import Repeats
from baga import CollectData
e = '-i/--minimum_percent_identity must be between 0 and 100 percent (low values not recommended!)'
assert 0 < args.minimum_percent_identity <= 100, e
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
assert all([use_path_genome,use_name_genome]), e
if args.find:
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
finder = Repeats.Finder(genome)
if args.method == 'baga':
# minimum_percent_identity defaults to 98%, argument takes 0.98 so *0.01
# minimum_repeat_length defaults to 400
finder.findRepeats(minimum_percent_identity = args.minimum_percent_identity * 0.01,
minimum_repeat_length = args.minimum_repeat_length,
max_extensions = 25)
finder.saveLocal(use_name_genome)
# also save just the ranges for filtering
baga.bagasave(finder.ambiguous_ranges, 'baga.Repeats.filter_regions-{}'.format(use_name_genome))
elif args.method == 'nucmer_check':
finder.findRepeatsNucmer(minimum_percent_identity = args.minimum_percent_identity * 0.01,
minimum_repeat_length = args.minimum_repeat_length)
finder.compareRepeatRegions()
if args.plot:
# if not args.find:
# print('Loading repeats and genome: %s' % use_name_genome)
# finder = baga.bagaload('baga.Repeats.Finder-%s' % use_name_genome)
Repeats.plotRepeats(use_name_genome, outdir = ['plots_repeats'])
if args.summarise:
Repeats.summariseRepeats(use_name_genome)
### Structure ###
if args.subparser == 'Structure':
print('\n-- Chromosome sequence rearrangement detection module --\n')
import baga
from baga import Structure
from baga import CollectData
if not(args.check or args.plot or args.plot_range or args.summarise or args.collect):
parser_Structure.error('Need at least one of --check/-c, --plot/-p or --plot_range/-r or --summarise/-s or --collect/-C')
if args.check or args.plot or args.plot_range or args.collect or args.collect_ranges:
# collect BAMs
if args.reads_name:
# baga pipeline information provided
if not args.genome_name:
parser.error('--genome_name/-g is required with --reads_name/-n. (The baga CollectData-processed genome used with the AlignReads option)')
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
assert all([use_path_genome,use_name_genome]), e
# in case full filename provided
use_name_group = args.reads_name.replace('baga.AlignReads.SAMs-', '' , 1).replace('.p.gz', '').replace('.baga', '')
print('Loading alignments information for: {}__{} from AlignReads output'.format(use_name_group, use_name_genome))
from baga import AlignReads
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}__{}.baga'.format(use_name_group, use_name_genome))
e = 'the reads for "--reads_name/-n {}" seem to not have been fully processed by the AlignReads module: they are missing the "ready_BAMs" attribute. Please ensure the AlignReads commands "--align --deduplicate --indelrealign" have been performed.'.format(args.reads_name)
assert hasattr(alignments, 'ready_BAMs'), e
### shouldn't all BAMs have headers parsed and stored in dict with (sample,genome) from the start?
BAMs = alignments.ready_BAMs[-1]
sample_names = sorted(alignments.read_files)
elif args.alignments_paths:
# list of folders or files provided
BAMs = []
for path in args.alignments_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseBAMs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('BAM', 'bam')]
e = 'No BAM files (*.bam or *.BAM) found in:\n{}'.format(args.alignments_paths)
assert len(theseBAMs), e
BAMs += theseBAMs
else:
# add file
BAMs += [path]
# check on requested samples: crop BAMs accordingly
if args.include_samples or args.exclude_samples:
if not args.reads_name:
# need sample names so will parse BAMs
import pysam
sample_names = {}
for BAM in BAMs:
sample_names[pysam.Samfile(BAM, 'rb').header['RG'][0]['ID']] = BAM
found_labels = sorted(sample_names)
if args.include_samples:
missing_labels = sorted(set(args.include_samples) - set(sample_names))
found_labels = sorted(set(args.include_samples) & set(sample_names))
e = ['None of the requested sample labels were found among the '\
'previously checked reads.']
e += ['Requested: {}'.format(', '.join(args.include_samples))]
e += ['Available: {}'.format(', '.join(sorted(sample_names)))]
assert len(found_labels), '\n'.join(e)
print('Found {} samples to use after --include_samples: {}'\
''.format(len(found_labels), ', '.join(found_labels)))
if len(missing_labels):
print('WARNING: could not find the following requested '\
'samples among previously checked reads.')
print(', '.join(missing_labels))
if args.exclude_samples:
print('Found {} samples to use, --exclude_samples provides {} '\
'to remove.'.format(len(found_labels),
len(args.exclude_samples)))
found_labels = sorted(set(found_labels) - set(args.exclude_samples))
print('{} samples remain to analyse: {}'.format(len(found_labels),
', '.join(found_labels)))
# update BAMs for args.check
#print(BAMs)
if not args.reads_name:
BAMs = [sample_names[sample] for sample in found_labels]
else:
BAMs = [BAM for BAM in BAMs if \
BAM.split(os.path.sep)[-1].split('__')[0] in found_labels]
#print(BAMs)
# update sample_names for arg.plot
sample_names = sorted(found_labels)
if args.check:
# check these genome-aligned read sets
checkers = Structure.checkStructure(BAMs, min_mapping_quality = 5,
smoothed_resolution = 10,
ratio_threshold = args.ratio_threshold)
if args.plot or args.plot_range:
if args.plot_range:
if args.plot_range[0] > args.plot_range[1]:
sys.exit('--plot_range values must be ascending!')
# need genome for plotting
if not args.genome_name:
parser.error('--genome_name/-g is required for plotting. (A baga CollectData-processed genome)')
else:
use_name_genome = args.genome_name.replace('baga.CollectData.Genome-', '' , 1).replace('.baga', '')
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = 'baga.CollectData.Genome-{}.baga'.format(use_name_genome), format = 'baga')
if not args.include_samples and not args.reads_name:
# need to get sample names from BAMs because not supplied
# and didn't collect yet because args.include_samples not provided
try:
import pysam
except ImportError:
sys.exit('Need pysam to get sample names if not provided for plotting. Use Dependencies --get pysam to install locally')
sample_names = []
for BAM in BAMs:
sample_names += [pysam.Samfile(BAM, 'rb').header['RG'][0]['ID']]
sample_names.sort()
plot_folder = 'plots_structure'
filter_name = 'high non-proper pairs'
for sample in sample_names:
# get information for plotting
filein = 'baga.Structure.CheckerInfo-{}__{}.baga'.format(sample, use_name_genome)
try:
checker_info = Structure.loadCheckerInfo(filein)
except IOError:
print('Could not find: {}'.format(filein))
e = 'Genome name for checker {} ({}) does not match name of supplied genome ({})'.format(
filein,
checker_info['genome_name'],
genome.id)
assert checker_info['genome_name'] == genome.id, e
outdir = os.path.sep.join([plot_folder, checker_info['genome_name']])
if not os.path.exists(outdir):
os.makedirs(outdir)
print('Plotting filter regions for {} reads aligned to {}'.format(sample, genome.id))
if args.plot_range:
# just the requested range
do_ranges = [args.plot_range]
elif args.plot:
# all ranges for filtering for this sample
# currently range selection is rearrangements filter, not including the extensions
# do_ranges = checker_info['suspect_region']['rearrangements_extended']
do_ranges = []
for s,e in checker_info['suspect_regions']['rearrangements']:
# select consistant plotting region for comparison between samples
plot_chrom_start = int(round(s - 500 - 100, -3))
if s + 2500 > e:
plot_chrom_end = plot_chrom_start + 2500
else:
plot_chrom_end = int(round(e + 500 + 100, -3))
do_ranges += [(plot_chrom_start,plot_chrom_end)]
for plot_chrom_start,plot_chrom_end in do_ranges:
if args.reads_name:
plot_filename = '{:07d}_{:07d}_{}__{}__{}.svg'.format( plot_chrom_start,
plot_chrom_end,
checker_info['genome_name'],
use_name_group,
sample)
else:
plot_filename = '{:07d}_{:07d}_{}__{}.svg'.format( plot_chrom_start,
plot_chrom_end,
checker_info['genome_name'],
sample)
plot_output_path = [outdir, plot_filename]
plot_output_path = os.path.sep.join(plot_output_path)
print(plot_output_path)
plotter = Structure.Plotter(checker_info, genome, plot_output_path)
plotter.doPlot(plot_chrom_start, plot_chrom_end, panel = ((1,1),(1,1)), label = sample)
## check for allowed combinations for --summarise
if args.checkinfos_path:
if not args.summarise:
parser.error('--summarise/-s is required with --checkinfos_path/-b.')
if args.genome_name or args.reads_name:
parser.error('--genome_name/-g and --reads_name/-n cannot be used with --checkinfos_path/-b.')
elif args.reads_name and not args.genome_name:
parser.error('--genome_name/-g is required with --reads_name/-n. (The baga CollectData-processed genome used with the AlignReads option)')
if args.summarise or args.collect or args.collect_ranges:
# both tasks share some requirements: deal with these first
if args.reads_name:
# baga pipeline information provided
if not args.genome_name:
parser.error('--genome_name/-g is required with --reads_name/-n. (The baga CollectData-processed genome used with the AlignReads option)')
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
assert all([use_path_genome,use_name_genome]), 'Could not locate genome given: {}'.format(args.genome_name)
# in case full filename provided
use_name_group = args.reads_name.replace('baga.AlignReads.SAMs-', '' , 1).replace('.p.gz', '').replace('.baga', '')
baga_file = 'baga.AlignReads.SAMs-{}__{}.baga'.format(use_name_group, use_name_genome)
print('Loading alignments information for: {} aligned to {} from {} output'.format(use_name_group, use_name_genome, baga_file))
from baga import AlignReads
alignments = AlignReads.SAMs(baga = baga_file)
sample_names = sorted(alignments.read_files)
if args.include_samples and args.reads_name:
missing_labels = sorted(set(args.include_samples) - set(sample_names))
found_labels = sorted(set(args.include_samples) & set(sample_names))
e = ['None of the requested sample labels were found among the rearrangements filter-checked reads.']
e += ['Requested: {}'.format(', '.join(args.include_samples))]
e += ['Available: {}'.format(', '.join(sorted(sample_names)))]
assert len(found_labels), '\n'.join(e)
if len(missing_labels):
print('WARNING: could not find the following requested samples among previously checked reads.')
print(', '.join(missing_labels))
# update sample_names
sample_names = sorted(found_labels)
if args.checkinfos_path:
# treat include_samples as list of files to deal with
# list of folders or files provided
print(args.checkinfos_path)
baga_filenames = {}
for path in args.checkinfos_path:
if os.path.isdir(path):
path_contents = os.listdir(path)
for f in path_contents:
if f[-5:] == '.baga' and f[:27] == 'baga.Structure.CheckerInfo-' and '__' in f[27:-5]:
baga_filenames[tuple(f[27:-5].split('__'))] = f
e = 'No baga.Structure.CheckerInfo-*__*.baga files found in:\n{}'.format(args.checkinfos_path)
assert len(baga_filenames), e
else:
# add file
f = path.split(os.path.sep)[-1]
if f[-5:] == '.baga' and f[:27] == 'baga.Structure.CheckerInfo-' and '__' in f[27:-5]:
baga_filenames[tuple(f[27:-5].split('__'))] = f
e = 'Could not find valid baga.Structure.CheckerInfo files at {}'.format(', '.join(args.checkinfos_path))
assert len(baga_filenames) > 0, e
else:
try:
baga_filenames = dict([(tuple([sample, use_name_genome]),'baga.Structure.CheckerInfo-{}__{}.baga'.format(sample, use_name_genome)) for sample in sample_names])
except NameError:
print('need --checkinfos_path or --reads_name')
sys.exit(1)
checker_info = {}
for (sample, genome_name), filein in sorted(baga_filenames.items()):
try:
print('Loading from {}'.format(filein))
filein.replace('baga.Structure.CheckerInfo-','')
this_checker_info = Structure.loadCheckerInfo(filein)
except IOError as e:
print('Cannot access provided baga.Structure.CheckerInfo file: {}'.format(filein))
print(e)
e = 'Genome name for checker {} ({}) does not match supplied genome name ({})'.format(
filein,
this_checker_info['genome_name'],
genome_name)
assert this_checker_info['genome_name'] == genome_name, e
# else proceed
checker_info[sample, genome_name] = this_checker_info
if args.summarise:
#### summarise only
if args.reads_name:
if args.genome_name:
foutname = 'rearrangements_regions_{}__{}.csv'.format(use_name_group,use_name_genome)
else:
foutname = 'rearrangements_regions_{}.csv'.format(use_name_group)
else:
foutname = 'rearrangements_regions.csv'
print('Writing to {}'.format(foutname))
with open(foutname, 'w') as fout:
fout.write('"chromosome","sample","filter","start","end"\n')
for (sample, genome_name), info in sorted(checker_info.items()):
print('Writing out {} regions'.format(sample))
for start, end in info['suspect_regions']['rearrangements']:
fout.write('"{}","{}","rearrangements1",{},{}\n'.format(genome_name, sample, start, end))
for start, end in info['suspect_regions']['rearrangements_extended']:
fout.write('"{}","{}","rearrangements2",{},{}\n'.format(genome_name, sample, start, end))
if args.collect or args.collect_ranges:
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
assert all([use_path_genome,use_name_genome]), 'Could not locate genome given: {}'.format(args.genome_name)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
## which BAMs?
## this is a bit clunky . . probably best to parse BAM headers once, above
## move into baga function from cli?
import pysam
BAMs_by_ids = {}
for BAM in BAMs:
header = pysam.Samfile(BAM, 'rb').header
BAMs_by_ids[(header['RG'][0]['ID'],header['SQ'][0]['SN'])] = BAM
from baga import AssembleReads
if args.max_memory:
use_mem_gigs = args.max_memory
else:
# round down available GBs
use_mem_gigs = int(baga.get_available_memory())
for (sample, genome_name), info in sorted(checker_info.items()):
path_to_fastq_folder = os.path.sep.join(['read_collections', genome_name])
if not os.path.exists(path_to_fastq_folder):
os.makedirs(path_to_fastq_folder)
print('Extracting reads aligned near rearrangements between {} and genome {} . . .'.format(sample, genome_name))
collector = Structure.Collector(BAMs_by_ids[(sample, genome_name)])
e = 'mismatch between BAM genome ({}) and genome used by BAGA ({})'.format(
collector.reads.references[0],
genome_name)
assert genome_name == collector.reads.references[0], e
if args.collect_ranges:
# single assembly of reads aligned to one or more ranges in reference
# versus separate assemblies of multiple ranges i.e., those with putative rearrangements
single_assembly = True
# ensure pairs of start-end ranges given
e = 'Odd number of ranges provided. Required: start-end, '\
'start-end integers as --collect_ranges start end start end'
assert len(args.collect_ranges) % 2 == 0, e
e = 'Ranges must be non-overlapping and in ascending order'
assert sorted(args.collect_ranges) == args.collect_ranges, e
use_regions = zip(args.collect_ranges[::2],args.collect_ranges[1::2])
use_num_padding_positions = 0
else:
single_assembly = False
# join main rearrangement zones with extended regions if found
# to get contiguous blocks for investigation
all_regions = sorted(
info['suspect_regions']['rearrangements'] + \
info['suspect_regions']['rearrangements_extended'])
from collections import Counter
use_regions = [a for b in all_regions for a in b]
c = Counter(use_regions)
use_regions = [a for a in use_regions if c[a] == 1]
use_regions = zip(use_regions[::2],use_regions[1::2])
use_num_padding_positions = args.num_padding_positions
collector.getUnmapped()
r1_out_path_um, r2_out_path_um, rS_out_path_um = collector.writeUnmapped(path_to_fastq_folder)
# assemble poorly/unmapped alone first
reads_path_unmapped = {}
output_folder_um = '_'.join(r1_out_path_um.split('_')[:-1]).split(os.path.sep)[-1]
reads_path_unmapped[output_folder_um] = r1_out_path_um, r2_out_path_um, rS_out_path_um
path_to_bad_unmapped_contigs = os.path.sep.join(['read_collections',
genome_name,
output_folder_um,
'contigs.fasta'])
if os.path.exists(path_to_bad_unmapped_contigs) and \
os.path.getsize(path_to_bad_unmapped_contigs) > 0 and \
not args.force:
print('Found assembly at {}\nUse --force/-F to overwrite. Skipping . . .'.format(path_to_bad_unmapped_contigs))
else:
if not args.force:
print('Nothing found at {}. Doing assembly.'.format(path_to_bad_unmapped_contigs))
reads = AssembleReads.DeNovo(paths_to_reads = reads_path_unmapped)
reads.SPAdes(output_folder = ['read_collections', genome_name], mem_num_gigs = use_mem_gigs,
only_assembler = True, careful = False)
# assemble read from each region with poorly/unmapped
reads_paths = {}
# make a second dict of reads for assembly, all values for unmapped reads
# that need to be included in each assembly
reads_path_unmapped = {}
assemblies_by_region = {}
for (s,e) in use_regions:
collector.makeCollection(s, e, use_num_padding_positions)
r1_out_path, r2_out_path, rS_out_path = collector.writeCollection(path_to_fastq_folder)
if not r1_out_path:
# if no reads found, False returned
# do not add to reads_paths dict for assembly
print('debug: no reads found by collector')
continue
# put assembly in folder with same name as read files
output_folder = '_'.join(r1_out_path.split('_')[:-1]).split(os.path.sep)[-1]
path_to_contigs = os.path.sep.join(['read_collections',
genome_name,
output_folder,
'contigs.fasta'])
assemblies_by_region[s,e] = path_to_contigs
if os.path.exists(path_to_contigs) and \
os.path.getsize(path_to_contigs) > 0 and \
not args.force:
print('Found assembly at {}\nUse --force/-F to overwrite. Skipping . . .'.format(path_to_contigs))
else:
reads_paths[output_folder] = (r1_out_path, r2_out_path, rS_out_path)
reads_path_unmapped[output_folder] = r1_out_path_um, r2_out_path_um, rS_out_path_um
print('debug: len(assemblies_by_region) == {}'.format(len(assemblies_by_region)))
reads = AssembleReads.DeNovo(paths_to_reads = reads_paths,
paths_to_reads2 = reads_path_unmapped)
reads.SPAdes(output_folder = ['read_collections', genome_name],
mem_num_gigs = use_mem_gigs, single_assembly = single_assembly,
only_assembler = True, careful = False)
# a dict of paths to contigs per region
aligner = Structure.Aligner(genome)
unmappedfasta = os.path.sep.join(['read_collections',
genome_name,
output_folder_um,
'contigs.fasta'])
if os.path.exists(unmappedfasta) and os.path.getsize(unmappedfasta) > 0:
if len(assemblies_by_region) > 0:
# provide dict of range tuples
aligner.alignRegions(assemblies_by_region,
use_num_padding_positions,
path_to_omit_sequences = unmappedfasta,
single_assembly = single_assembly,
min_region_length = args.min_align_region)
aligner.reportAlignments()
else:
print('WARNING: no assembled regions found. Either there are '\
'none which is fine, or SPAdes assemblies failed '\
'to finish. You could check SPAdes log files in '\
'folders in {}'.format(os.path.sep.join([
'read_collections', genome_name])))
else:
print('WARNING: no assembled unmapped and poorly mapped reads found at:\n{}'.format(unmappedfasta))
try:
r1_size = os.path.getsize(r1_out_path_um)
r2_size = os.path.getsize(r2_out_path_um)
print('but reads, {} ({:,} bytes) and {} ({:,} bytes), exist . . check SPAdes assembly log in {}'.format(
r1_out_path_um,
r1_size,
r2_out_path_um,
r2_size,
unmappedfasta.replace('contigs.fasta','')))
except IOError:
print('WARNING: could not find unmapped and poorly '\
'aligned reads at:\n{}\n{}\nthis is unexpected but '\
'conceivable (if ALL reads really did map to reference!).'.format(
r1_out_path_um,r2_out_path_um))
print('proceeding with alignment of assembled putatively rearranged regions to reference nonetheless')
if len(assemblies_by_region) > 0:
aligner.alignRegions(assemblies_by_region, use_num_padding_positions, single_assembly = single_assembly)
aligner.reportAlignments()
else:
print('WARNING: no assembled regions found. Either there are '\
'none which is fine, or SPAdes assemblies failed '\
'to finish. You could check SPAdes log files in '\
'folders in {}'.format(os.path.sep.join([
'read_collections', genome_name])))
### Call Variants ###
if args.subparser == 'CallVariants':
print('\n-- Variant Calling module --\n')
# check whether GATK path is needed
if any([args.callsingles,
args.calleach,
args.calljoint,
args.hardfilter,
args.recalibrate
]):
assert args.reads_name, '--reads_name is required for calling with GATK'
if len(args.reads_name) != 1:
sys.exit('Only one reads group can be processed by GATK per '\
'analysis (supplied: {})'.format(', '.join(args.reads_name)))
if args.calldisco:
sys.exit('--calldisco cannot be used with any GATK options!')
elif not args.GATK_jar_path:
sys.exit('''Please supply:
--GATK_jar_path
if using any of:
--callsingles
--calleach
--calljoint
--hardfilter
--recalibrate
''')
elif not args.genome_name:
sys.exit('--genome_name is required for GATK')
if args.genome_name:
use_path_genome,use_name_genome = check_baga_path(
'baga.CollectData.Genome', args.genome_name)
assert all([use_path_genome,use_name_genome]), 'Could not locate a '\
'saved baga.CollectData.Genome-<genome_name>.baga for name '\
'given: {}'.format(args.genome_name)
from baga import CollectData
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
import baga
from baga import CallVariants
if args.check:
assert (args.vcfs_paths and args.genome_name and args.alignments_paths), ''\
'--check needs --vcfs_paths to know which variants to check for and '\
'--genome_name to align contigs and --alignments_paths for the BAM files '\
'against.'
def collectfiles(paths, file_extensions):
files = []
for path in paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
thesefiles = [os.path.sep.join([path,f]) for f in path_contents if \
f.split(_os.path.extsep)[-1] in file_extensions]
glob_extensions = ['*.{}'.format(e) for e in file_extensions]
assert len(thesefiles), 'No files ({}) found in:\n{}'\
''.format(' or '.join(glob_extensions), args.alignments_paths)
files += thesefiles
else:
# add file
files += [path]
return(files)
BAMs = collectfiles(args.alignments_paths, ('BAM', 'bam'))
VCFs = collectfiles(args.vcfs_paths, ('VCF', 'vcf'))
checker = CallVariants.Checker(VCFs, BAMs, genome)
checker.doCheck(num_padding = 1000, max_memory = args.max_memory,
force = args.force)
else:
if args.calldisco:
assert args.reads_name, '--reads_name is required for calling with DiscoSNP++'
# check if direct commands make sense
direct_arguments = check_direct_arguments(args.arguments,
wrapped_tools = ['DiscoSNP++'])
# allows for multiple tools to have direct arguments processed together
try:
use_arguments = direct_arguments['DiscoSNP++']
except KeyError:
use_arguments = False
# load baga reads
from baga import PrepareReads
use_these = []
use_these_names = []
for reads_group in args.reads_name:
use_path_reads,use_name_reads = check_baga_path(
'baga.PrepareReads.Reads', reads_group)
use_these_names += [use_name_reads]
assert all([use_path_reads,use_name_reads]), 'Could not locate a saved '\
'baga.PrepareReads.Reads-<reads_name>.baga for reads group '\
'given: {}'.format(reads_group)
use_these += [PrepareReads.Reads(path_to_baga = use_path_reads)]
print('Loaded: {}'.format(use_path_reads))
if args.genome_name:
caller = CallVariants.CallerDiscoSNP(reads = use_these, genome = genome)
else:
caller = CallVariants.CallerDiscoSNP(reads = use_these)
if args.genome_name:
add_prefix = use_name_genome + '_' + '+'.join(use_these_names)
else:
add_prefix = 'noref_' + '+'.join(use_these_names)
caller.call(use_existing_graph = args.use_existing_graph, add_prefix = add_prefix, arguments = use_arguments)
elif any([args.callsingles,
args.calleach,
args.calljoint,
args.hardfilter,
args.recalibrate
]):
# check if direct commands make sense
direct_arguments = check_direct_arguments(args.arguments,
wrapped_tools = ['HaplotypeCaller', 'GenotypeGVCFs'])
# GATK only other option implemented.
# check_baga_path() can handle full object names
# e.g. baga.CollectData.Genome-mygenome.baga as well as actual names
# i.e. mygenome . . . but not for this reads__genome compound name
# so args.reads must be given correctly. Feedback given is a file can't
# be found so should still be fine for ease-of-use.
reads_genome_name = '__'.join([args.reads_name[0], use_name_genome])
# see what is already available (existing analysis of data from
# previous baga stage or data from this stage)
use_path_alns,use_name_alns = check_baga_path(
'baga.AlignReads.SAMs', reads_genome_name)
use_path_caller,use_name_caller = check_baga_path(
'baga.CallVariants.CallerGATK', reads_genome_name)
if args.max_memory:
max_memory = args.max_memory
else:
max_memory = 8
if args.new:
print('Starting new variant calling analysis because --new '\
'requested. Will overwrite any previous analyses.')
from baga import AlignReads
assert all([use_path_alns,use_name_alns]), 'Could not locate a saved '\
'baga.AlignReads.SAMs-<reads_name>__<genome_name>.baga for '\
'reads group and genome combination given: {}'.format(
use_name_alns)
print('Loading alignments information for: {} from AlignReads output'.format(use_name_alns))
alignments = AlignReads.SAMs(baga = use_path_alns)
caller = CallVariants.CallerGATK(alignments = alignments)
elif use_path_caller:
# attempt to resume (if use_path_caller not False)
print('Loading existing variants call analysis for: {}'.format(use_name_alns))
print('(use --new to start variant calling again)')
caller = CallVariants.CallerGATK(
baga = use_path_caller)
else:
# start new as previous not found
assert all([use_path_alns,use_name_alns]), 'Could not locate a saved '\
'baga.AlignReads.SAMs-<reads_name>__<genome_name>.baga for '\
'reads group and genome combination given: {}'.format(
use_name_alns)
print('Starting new variant calling analysis (could not find '\
'previous baga.CallVariants.CallerGATK-{}.baga)'.format(
reads_genome_name))
from baga import AlignReads
print('Loading alignments information for: {} from AlignReads '\
'output'.format(use_name_alns))
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}.baga'\
''.format(use_name_alns))
caller = CallVariants.CallerGATK(alignments = alignments)
# because --new, setting --force to true to overwrite each output file
if args.new:
print('Because --new, also setting --force to overwrite each output file')
args.force = True
if args.callsingles and (args.calleach or args.calljoint):
print('--callsingles for calling variants in individual samples '\
'cannot be used with the --calleach plus --calljoint '\
'combination that is used for joint variant calling for a cohort')
sys.exit(1)
if args.callsingles:
try:
use_arguments = direct_arguments['HaplotypeCaller']
except KeyError:
use_arguments = False
caller.CallVCFsGATK(
mem_num_gigs = max_memory,
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force,
max_cpus = args.max_cpus,
arguments = use_arguments)
caller.saveLocal(use_name_alns)
if args.calleach:
try:
use_arguments = direct_arguments['HaplotypeCaller']
except KeyError:
use_arguments = False
caller.CallgVCFsGATK(
mem_num_gigs = max_memory,
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force,
max_cpus = args.max_cpus,
arguments = use_arguments)
caller.saveLocal(use_name_alns)
if args.calljoint:
try:
use_arguments = direct_arguments['GenotypeGVCFs']
except KeyError:
use_arguments = False
caller.GenotypeGVCFsGATK(
reads_genome_name,
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
# ultimately, scale this by the number of samples involved
# needed >8 for a set of 40
mem_num_gigs = max_memory,
force = args.force,
arguments = use_arguments)
caller.saveLocal(use_name_alns)
if args.hardfilter:
caller.hardfilterSNPsGATK(
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force)
caller.hardfilterINDELsGATK(
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force)
caller.saveLocal(use_name_alns)
if args.recalibrate:
# this is slow!
caller.recalibBaseScoresGATK(
jar = args.GATK_jar_path.split(os.path.sep),
use_java = use_java,
force = args.force,
mem_num_gigs = max_memory,
max_cpus = args.max_cpus)
caller.saveLocal(use_name_alns)
### Filter Variants ###
if args.subparser == 'FilterVariants':
print('\n-- Filter Variants (part of the Variant Calling module) --\n')
## to apply variants, provide one reads group name
if args.reads_name:
if len(args.reads_name) > 1:
sys.exit('Filters can only be applied to one group of reads at a time. Multiple sets can be handled with the --report option, though.')
## to report effects of filters, provide one or more read group names along with --report
from baga import CallVariants
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
assert all([use_path_genome,use_name_genome]), e
VCFs_for_report = {}
# collect VCFs
if args.reads_name:
for these_reads in args.reads_name:
# baga pipeline information provided
# allow for multiple rounds of recalibration at end of CallVariants i.e., 1 or 2 and select 2 if available
import baga
# sometimes the baga from the previous step in the pipeline is not actually needed
# so this name and file check could be relaxed
use_path_reads,use_name_reads = check_baga_path('baga.PrepareReads.Reads', these_reads)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga for reads group given: {}'.format(these_reads)
assert all([use_path_reads,use_name_reads]), e
alns_name = '__'.join([use_name_reads, use_name_genome])
filein = 'baga.CallVariants.CallerGATK-{}.baga'.format(alns_name)
caller = CallVariants.CallerGATK(baga = filein)
if hasattr(caller, 'path_to_hardfiltered_SNPs') and hasattr(caller, 'path_to_hardfiltered_INDELs'):
# only one for VCFs so no overwriting
if isinstance(caller.path_to_hardfiltered_SNPs[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.path_to_hardfiltered_SNPs[-1] + caller.path_to_hardfiltered_INDELs[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.path_to_hardfiltered_SNPs[-1], caller.path_to_hardfiltered_INDELs[-1]]
# more than one can be handled with --report though
# only implemented for separate VCFs currently
VCFs_for_report[these_reads] = {'SNPs':caller.path_to_hardfiltered_SNPs[-1], 'InDels':caller.path_to_hardfiltered_INDELs[-1]}
elif hasattr(caller, 'path_to_unfiltered_VCF'):
print('WARNING: path to GATK hardfiltered variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
if isinstance(caller.path_to_unfiltered_VCF[-1],list):
VCFs = caller.path_to_unfiltered_VCF[-1]
else:
VCFs = [caller.path_to_unfiltered_VCF[-1]]
elif hasattr(caller, 'paths_to_raw_gVCFs'):
print('WARNING: path to GATK joint called variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
if isinstance(caller.paths_to_raw_gVCFs[-1],list):
VCFs = caller.paths_to_raw_gVCFs[-1]
else:
VCFs = [caller.paths_to_raw_gVCFs[-1]]
else:
print('WARNING: path to GATK called variants not found in {}'.format(filein))
sys.exit('It seems the analysis described in {} is incomplete. Try completing or rerunning using the CallVariants module'.format(filein))
else:
# list of folders or files provided in args.vcfs_paths
VCFs = []
for path in args.vcfs_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseVCFs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('VCF', 'vcf')]
e = 'No VCF files (*.vcf or *.VCF) found in:\n{}'.format(args.alignments_paths)
assert len(theseVCFs), e
VCFs += theseVCFs
else:
# add file
VCFs += [path]
print('Loaded VCF locations:\n{}'.format('\n'.join(VCFs)))
# check accessible and collect sample names with genome accession
sample_names = {}
for VCF in VCFs:
try:
with open(VCF, 'r') as filein:
header, header_section_order, colnames, variants = CallVariants.parseVCF(VCF)
bits = header['contig'][0].split('<')[-1].split('>')[0].split(',')
contiginfo = dict([bit.split('=') for bit in bits])
for sample_name in colnames[9:]:
sample_names[sample_name] = contiginfo
except IOError as e:
print(e)
sys.exit('Failed to open: {}'.format(VCF))
# genome is only needed when generating csv summary with ORFs affected
from baga import CollectData
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
print('Loading filter information . . .')
filters = {}
if 'genome_repeats' in args.filters:
from baga import Repeats
filein = 'baga.Repeats.FinderInfo-{}.baga'.format(use_name_genome)
finder_info = Repeats.loadFinderInfo(filein)
filters['genome_repeats'] = finder_info['ambiguous_ranges']
# print a summary
t = len(filters['genome_repeats'])
s = sum([(e - s) for s,e in filters['genome_repeats']])
print('Reference genome sequence {} contains {} repeated regions spanning {:,} basepairs'.format(use_name_genome, t, s))
if 'rearrangements' in args.filters:
from baga import Structure
filters['rearrangements'] = {}
#for sample,checker in checkers.items():
for sample,genomeinfo in sorted(sample_names.items()):
if args.include_samples:
if sample not in args.include_samples:
continue
filein = 'baga.Structure.CheckerInfo-{}__{}.baga'.format(sample, genomeinfo['ID'])
if args.path_to_rearrangements_info:
filein = os.path.sep.join([args.path_to_rearrangements_info,filein])
checker_info = Structure.loadCheckerInfo(filein)
e = 'Genome name for checker {} ({}) does not match supplied genome name ({})'.format(filein, checker_info['genome_name'], genome.id)
name_match = checker_info['genome_name'] == genome.id
#### temporarily force to true: CheckerInfo will now save genome length
checker_info['genome_length'] = len(genome.sequence)
length_match = checker_info['genome_length'] == len(genome.sequence)
if not name_match:
print('WARNING: genome name used for rearrangements filter for {} ({}) does not match requested genome to use ({}).'.format(sample, checker_info['genome_name'], genome.id))
if length_match:
print('Genome lengths match so proceeding and assuming alternative names or accession numbers for same genome ({:,} bp).'.format(checker_info['genome_length']))
else:
sys.exit('ERROR: genome length mismatch. Different genome used for rearrangemtns filter analysis to one requested to use to apply filter? ({:,} bp vs {:,} bp).'.format(checker_info['genome_length'],len(genome.sequence)))
# report brief summary for this sample
print('For sample {}, relative to {}:'.format(sample, genome.id))
#t = len(checker_info['suspect_regions']['high non-proper pairs'])
t = len(checker_info['suspect_regions']['rearrangements'])
s = sum([(e - s) for s,e in checker_info['suspect_regions']['rearrangements']])
print(' {} regions spanning {:,} basepairs are affected by rearrangements thus having ambiguous 1:1 orthology.'.format(t, s))
#t = len(checker_info['suspect_regions_extensions']['high non-proper pairs'])
t = len(checker_info['suspect_regions']['rearrangements_extended'])
s = sum([(e - s) for s,e in checker_info['suspect_regions']['rearrangements_extended']])
print(' {} additional regions spanning {:,} basepairs are adjacent to the above regions but have a >50% zero read depth over a moving window (i.e., any aligned reads have a patchy distribution, are usually rare). These are typically large deletions including missing prophage and genomic islands\n'.format(t, s))
filters['rearrangements'][sample] = checker_info['suspect_regions']
filter_applier = CallVariants.Filter(VCFs, genome) #, use_name_reads)
filter_applier.doFiltering(filters)
### Summarise Variants ###
if args.subparser == 'SummariseVariants':
print('\n-- Summarise Variants (part of the Variant Calling module) --\n')
from baga import CallVariants
## to apply variants, provide one reads group name
if args.reads_name:
assert args.genome_names, "--genome_names is required with --reads_name"
VCFs_for_report = {}
# collect genomes
# plural genomes is a bit of a hack at the moment and only works here with named VCFs . . .
if args.genome_names:
from baga import CollectData
use_genomes = []
for genome_name in args.genome_names:
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', genome_name)
assert all([use_path_genome,use_name_genome]), 'Could not locate genome: {}'.format(genome_name)
use_genomes += [CollectData.Genome(local_path = use_path_genome, format = 'baga')]
else:
use_genomes = False
# collect VCFs
if args.reads_name:
for these_reads in args.reads_name:
# baga pipeline information provided
# allow for multiple rounds of recalibration at end of CallVariants i.e., 1 or 2 and select 2 if available
import baga
# sometimes the baga from the previous step in the pipeline is not actually needed
# so this name and file check could be relaxed
use_path_reads,use_name_reads = check_baga_path('baga.PrepareReads.Reads', these_reads)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga for reads group given: {}'.format(these_reads)
assert all([use_path_reads,use_name_reads]), e
alns_name = '__'.join([use_name_reads, use_name_genome])
filein = 'baga.CallVariants.CallerGATK-{}.baga'.format(alns_name)
caller = CallVariants.CallerGATK(baga = filein)
if hasattr(caller, 'path_to_hardfiltered_SNPs') and hasattr(caller, 'path_to_hardfiltered_INDELs'):
# only one for VCFs so no overwriting
if isinstance(caller.path_to_hardfiltered_SNPs[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.path_to_hardfiltered_SNPs[-1] + caller.path_to_hardfiltered_INDELs[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.path_to_hardfiltered_SNPs[-1], caller.path_to_hardfiltered_INDELs[-1]]
# more than one can be handled with --report though
# only implemented for separate VCFs currently
VCFs_for_report[these_reads] = {'SNPs':caller.path_to_hardfiltered_SNPs[-1], 'InDels':caller.path_to_hardfiltered_INDELs[-1]}
elif hasattr(caller, 'path_to_unfiltered_VCF'):
print('WARNING: path to GATK hardfiltered variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
if isinstance(caller.path_to_unfiltered_VCF[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.path_to_unfiltered_VCF[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.path_to_unfiltered_VCF[-1]]
elif hasattr(caller, 'paths_to_raw_gVCFs'):
print('WARNING: path to GATK joint called variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
if isinstance(caller.paths_to_raw_gVCFs[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.paths_to_raw_gVCFs[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.paths_to_raw_gVCFs[-1]]
else:
print('WARNING: path to GATK called variants not found in {}'.format(filein))
sys.exit('It seems the analysis described in {} is incomplete. Try completing or rerunning using the CallVariants module'.format(filein))
else:
# list of folders or files provided in args.vcfs_paths
VCFs = []
for path in args.vcfs_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseVCFs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('VCF', 'vcf')]
e = 'No VCF files (*.vcf or *.VCF) found in:\n{}'.format(args.alignments_paths)
assert len(theseVCFs), e
VCFs += theseVCFs
else:
# add file
VCFs += [path]
print('Loaded VCF locations:\n{}'.format('\n'.join(VCFs)))
message = 'Need --filters FILTER_NAME [FILTER_NAME] for report type "{}"'
if args.cumulative:
assert args.filters, message.format('cumulative')
print('Reporting cumulative variant totals by class and group as '\
'filters applied : {}'.format('+'.join(args.filters)))
for genome in use_genomes:
CallVariants.reportCumulative(args.filters, genome.id, VCFs_for_report)
if args.lists:
assert args.filters, message.format('lists')
print('Reporting lists of variants by class and group with filters'\
': {}'.format('+'.join(args.filters)))
for genome in use_genomes:
CallVariants.reportLists(args.filters, genome.id, VCFs_for_report)
if args.simple:
summariser = CallVariants.Summariser(VCFs, genomes = use_genomes)
summariser.simple()
### Check Linkage ###
if args.subparser == 'CheckLinkage':
print('\n-- Check Linkage (part of the Variant Calling module) --\n')
# required input: paths to corresponding VCFs and BAMs
# which baga objects contain that information?
from baga import CollectData
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
from baga import CallVariants
if args.reads_name:
# allegedly works in 2 and 3
raise NotImplementedError('Waiting for pooled samples to be implemented in baga.CallVariants. Use --vcfs_paths --alignments_paths instead')
# part of baga pipeline
if not args.genome_name:
parser.error('--genome_name/-g is required with --reads_name/-n. (The baga CollectData-processed genome used with the AlignReads option)')
# in this case, don't need to load the genome, just have its sanitised name
# e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
# assert all([use_path_genome,use_name_genome]), e
# allow for multiple rounds of recalibration at end of CallVariants i.e., 1 or 2 and select 2 if available
# sometimes the baga from the previous step in the pipeline is not actually needed
# so this name and file check could be relaxed
use_path_reads,use_name_reads = check_baga_path('baga.PrepareReads.Reads', args.reads_name)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga for reads group given: {}'.format(these_reads)
assert all([use_path_reads,use_name_reads]), e
alns_name = '__'.join([use_name_reads, use_name_genome])
filein = 'baga.CallVariants.CallerGATK-{}.baga'.format(alns_name)
caller = CallVariants.CallerGATK(baga = filein)
if hasattr(caller, 'path_to_hardfiltered_SNPs') and hasattr(caller, 'path_to_hardfiltered_INDELs'):
# only one for VCFs so no overwriting
VCFs = [caller.path_to_hardfiltered_SNPs[-1], caller.path_to_hardfiltered_INDELs[-1]]
elif hasattr(caller, 'path_to_unfiltered_VCF'):
print('WARNING: path to GATK hardfiltered variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
VCFs = [caller.path_to_unfiltered_VCF[-1]]
elif hasattr(caller, 'paths_to_raw_gVCFs'):
print('WARNING: path to GATK joint called variants not found in {}'.format(filein))
print('It is recommended to complete the GATK variant calling with the CallVariants module')
VCFs = caller.paths_to_raw_gVCFs[-1]
else:
print('WARNING: path to GATK called variants not found in {}'.format(filein))
sys.exit('It seems the analysis described in {} is incomplete. Try completing or rerunning using the CallVariants module'.format(filein))
print('Loading alignments information for: {}__{} from AlignReads output'.format(use_name_group, use_name_genome))
from baga import AlignReads
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}__{}.baga'.format(use_name_group, use_name_genome))
e = 'the reads for "--reads_name/-n {}" seem to not have been fully processed by the AlignReads module: they are missing the "ready_BAMs" attribute. Please ensure the AlignReads commands "--align --deduplicate --indelrealign" have been performed.'.format(args.reads_name)
assert hasattr(alignments, 'ready_BAMs'), e
### shouldn't all BAMs have headers parsed and stored in dict with (sample,genome) from the start?
BAMs = alignments.ready_BAMs[-1]
sample_names = sorted(alignments.read_files)
# check on requested samples: crop BAMs accordingly <== and VCFs, and what about sample matching . . .
if args.include_samples:
if not args.reads_name:
# need sample names so will parse BAMs
import pysam
sample_names = {}
for BAM in BAMs:
sample_names[pysam.Samfile(BAM, 'rb').header['RG'][0]['ID']] = BAM
missing_labels = sorted(set(args.include_samples) - set(sample_names))
found_labels = sorted(set(args.include_samples) & set(sample_names))
e = ['None of the requested sample labels were found among the previously checked reads.']
e += ['Requested: {}'.format(', '.join(args.include_samples))]
e += ['Available: {}'.format(', '.join(sorted(sample_names)))]
assert len(found_labels), '\n'.join(e)
if len(missing_labels):
print('WARNING: could not find the following requested samples among previously checked reads.')
print(', '.join(missing_labels))
# update BAMs for args.check
print(BAMs)
if not args.reads_name:
BAMs = [sample_names[sample] for sample in found_labels]
else:
BAMs = [BAM for BAM in BAMs if BAM.split(os.path.sep)[-1].split('__')[0] in found_labels]
print(BAMs)
# update sample_names for arg.plot
sample_names = sorted(found_labels)
else:
# list of folders or files provided in args.vcfs_paths
VCFs = []
for path in args.vcfs_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseVCFs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('VCF', 'vcf')]
e = 'No VCF files (*.vcf or *.VCF) found in:\n{}'.format(args.alignments_paths)
assert len(theseVCFs), e
VCFs += theseVCFs
else:
# add file
VCFs += [path]
print('Loaded VCF locations:\n{}'.format('\n'.join(VCFs)))
BAMs = []
for path in args.alignments_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseBAMs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('BAM', 'bam')]
e = 'No BAM files (*.bam or *.BAM) found in:\n{}'.format(args.alignments_paths)
assert len(theseBAMs), e
BAMs += theseBAMs
else:
# add file
BAMs += [path]
print('Loaded BAM locations:\n{}'.format('\n'.join(BAMs)))
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
if args.check:
linkage_checker = CallVariants.Linkage(genome = genome, vcf_paths = VCFs, alignment_paths = BAMs)
linkage_checker.doLinkageCheck()
else:
print('use --check to actually fo the checking . . .')
if args.subparser == 'ComparativeAnalysis':
print('\n-- Comparative Analyses --\n')
from baga import AlignReads
from baga import CallVariants
from baga import ComparativeAnalysis
from baga import CollectData
if args.build_MSA:
# required: reference genome
# required: reads name OR path to VCFs (. . optional path to BAMs to properly include gaps)
# ... need to link VCFs to BAMs . . . .
# check appropriate combination of options provided
if args.reads_name:
if args.sample_bams:
print('If --reads_name/-n provided, --sample_bams/-B is not necessary: ignoring latter')
elif args.vcfs_paths:
# these two added as "add_mutually_exclusive_group"
if args.include_invariants:
if not args.sample_bams:
print('WARNING: making a full-length multiple-sequence alignment without checking read alignments for missing pieces of chromosome is a risky assumption! Only proceed if you know there are no missing pieces of chromosome among your samples relative to the reference chromosome and/or BAMs are unavailable.')
# VCFs contain sample names, require file sample_name\tpath_to_bam\n
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
e = 'Could not locate a saved baga.CollectData.Genome-<genome_name>.baga for name given: {}'.format(args.genome_name)
assert all([use_path_genome,use_name_genome]), e
if args.reads_name:
# baga pipeline information provided <== only way currently implemented
# allow for multiple rounds of recalibration at end of CallVariants i.e., 1 or 2 and select 2 if available
import baga
path_to_SNPs_VCFs = []
path_to_InDels_VCFs = []
paths_to_BAMs = []
for these_reads in args.reads_name:
print('Collecting VCFs for {}'.format(these_reads))
# sometimes the baga from the previous step in the pipeline is not actually needed
# so this name and file check could be relaxed
use_path_reads,use_name_reads = check_baga_path('baga.PrepareReads.Reads', these_reads)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga for reads group given: {}'.format(these_reads)
assert all([use_path_reads,use_name_reads]), e
alns_name = '__'.join([use_name_reads, use_name_genome])
filein = 'baga.CallVariants.CallerGATK-{}.baga'.format(alns_name)
caller = CallVariants.CallerGATK(baga = filein)
if hasattr(caller, 'path_to_hardfiltered_SNPs') and hasattr(caller, 'path_to_hardfiltered_INDELs'):
if isinstance(caller.path_to_hardfiltered_SNPs[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.path_to_hardfiltered_SNPs[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.path_to_hardfiltered_SNPs[-1]]
for checkthis in VCFs:
try:
with open(checkthis) as fin:
#### list
path_to_SNPs_VCFs += [checkthis]
print('Found: {}'.format(checkthis))
except IOError:
print('Could not find: {}'.format(checkthis))
sys.exit('You may need to rerun the analysis that should have generated that file')
if isinstance(caller.path_to_hardfiltered_SNPs[-1],list):
# called with --callsingle: one sample per VCF
VCFs = caller.path_to_hardfiltered_INDELs[-1]
else:
# called with --calleach --calljoint: multi-sample VCFs
VCFs = [caller.path_to_hardfiltered_INDELs[-1]]
for checkthis in VCFs:
try:
with open(checkthis) as fin:
path_to_InDels_VCFs += [checkthis]
print('Found: {}'.format(checkthis))
except IOError:
print('Could not find: {}'.format(checkthis))
sys.exit('You may need to rerun the analysis that should have generated that file')
else:
print('ERROR: path to GATK hardfiltered variants not found in {}'.format(filein))
print('It seems the analysis described in {} is incomplete. Try completing or rerunning using the CallVariants module'.format(filein))
NotImplementedError('Building multiple alignments from one VCF per sample is not yet implemented: Coming soon!')
# see code in ApplyFilters section to collect other per sample VCFs
# greedily collect version of each vcf with the most filters applied
import re
from glob import glob
patt = re.compile('(__F_)')
path_to_SNPs_VCFs_use = []
for path in path_to_SNPs_VCFs:
numfilters = {}
for path2 in glob(path.replace('.vcf','*.vcf').replace('.VCF','*.VCF')):
numfilters[path2] = len(re.findall(patt,path2))
path_to_SNPs_VCFs_use += [sorted(numfilters, key = numfilters.get)[-1]]
print('Using:\n{}'.format('\n'.join(path_to_SNPs_VCFs_use)))
path_to_SNPs_VCFs = path_to_SNPs_VCFs_use
print('Collecting BAMs for {}'.format(these_reads))
alignments = AlignReads.SAMs(baga = 'baga.AlignReads.SAMs-{}.baga'.format(alns_name))
for BAM in alignments.ready_BAMs[-1]:
checkthis = BAM
try:
with open(checkthis) as fin:
paths_to_BAMs += [checkthis]
print('Found: {}'.format(checkthis))
except IOError:
print('Could not find: {}'.format(checkthis))
sys.exit('You may need to rerun the analysis that should have generated that file')
MSA_filename = '{}__{}_SNPs'.format(use_name_genome,'_'.join(args.reads_name))
paths_to_VCFs = path_to_SNPs_VCFs + path_to_InDels_VCFs
else:
# list of folders or files provided in args.vcfs_paths
# not part of a baga pipeline, so need BAMs linked to samples separately in --sample_bams
paths_to_VCFs = []
paths_to_BAMs = []
for path in args.vcfs_paths:
if os.path.isdir(path):
path_contents = os.listdir(path)
theseVCFs = [os.path.sep.join([path,f]) for f in path_contents if f[-3:] in ('VCF', 'vcf')]
e = 'No VCF files (*.vcf or *.VCF) found in:\n{}'.format(args.alignments_paths)
assert len(theseVCFs), e
paths_to_VCFs += theseVCFs
else:
# add file
paths_to_VCFs += [path]
# could forbid full size alignments without BAMs to inform missing regions
# if any([args.include_invariants, args.sample_bams]):
# e = '--include_invariants and --sample_bams must be used together.'
# assert all([args.include_invariants, args.sample_bams]), e
# but will allow it with a warning for flexibility in case BAMs unavailable etc
if args.sample_bams:
# path_to_VCFs and file linking samples to supplied instead of full baga pipeline info
try:
BAMs = dict([line.rstrip().split('\t') for line in open(args.sample_bams).readlines()])
except IOError:
print('there was a problem reading file: {}'.format(args.sample_bams))
except ValueError:
print('there was a problem parsing file: {}'.format(args.sample_bams))
# make a list for .getCoverageRanges()
paths_to_BAMs = sorted(BAMs.values())
path_to_InDels_VCFs = False
# could generate better name here?
if len(paths_to_VCFs) == 1:
vcf_name = '1_VCF'
else:
vcf_name = '{}_VCFs'.format(len(paths_to_VCFs))
MSA_filename = '{}__{}_SNPs'.format(use_name_genome,vcf_name)
print('Loaded VCF locations:\n{}'.format('\n'.join(paths_to_VCFs)))
### now collected required info: build MSA
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
MSA_builder = ComparativeAnalysis.MultipleSequenceAlignment(paths_to_VCFs)
MSA_builder.collectVariants(samples_to_include = args.include_samples,
samples_to_exclude = args.exclude_samples)
if len(paths_to_BAMs):
print('Loaded BAM locations:\n{}'.format('\n'.join(paths_to_BAMs)))
MSA_builder.getCoverageRanges(paths_to_BAMs)
MSA_builder.writeMSA( MSA_filename,
strict_core = args.core_only,
include_invariants = args.include_invariants,
genome = genome)
if args.infer_phylogeny:
# args
assert args.path_to_MSA is not None, '--path_to_MSA is required for --infer_phylogeny'
phylo_analyser = ComparativeAnalysis.Phylogenetics(args.path_to_MSA)
if args.program == 'phyML':
#currently only and default choice!
phylo_analyser.estimate_phylogeny_PhyML(num_bootstraps = args.num_bootstraps, collect_previous = False)
phylo_analyser.load_tree()
phylo_analyser.reroot_to_outgroup(args.out_group)
phylo_analyser.write_tree()
if args.infer_recombination:
assert args.path_to_MSA is not None, '--path_to_MSA is required for --infer_recombination (should be that used to estimate tree at --path_to_tree)'
assert args.path_to_tree is not None, '--path_to_tree is required for --infer_recombination (should be that estimated from alignment at --path_to_MSA)'
phylo_analyser = ComparativeAnalysis.Phylogenetics(args.path_to_MSA, path_to_tree = args.path_to_tree)
# bit of a fudge dealing with rooted tree . . . to be improved
phylo_analyser.collectPhyMLstats(path_to_stats_file = args.path_to_tree.replace('_rooted','').replace('_phyml_tree','') + '_phyml_stats')
phylo_analyser.infer_recombination(bootstraps = args.num_bootstraps) #, output_suffix = '_rooted')
if args.plot_phylogeny:
# should check either or etc here
if args.genome_name:
use_path_genome,use_name_genome = check_baga_path('baga.CollectData.Genome', args.genome_name)
print('Loading genome %s' % use_name_genome)
genome = CollectData.Genome(local_path = use_path_genome, format = 'baga')
genome_length = len(genome.sequence)
elif args.genome_length:
genome_length = args.genome_length
else:
print('Provide --genome_name or --genome_length for a scale bar unit of actual substitutions')
genome_length = False
if args.plot_transfers:
bits = args.path_to_tree.split(os.path.extsep)
bits[-2] = bits[-2]+'_transfers'
plot_output_path = os.path.extsep.join(bits[:-1] + ['svg'])
else:
plot_output_path = os.path.extsep.join(args.path_to_tree.split(os.path.extsep)[:-1] + ['svg'])
print('Plotting to {}'.format(plot_output_path))
phylo_plotter = ComparativeAnalysis.Plotter(
plot_output_path,
args.path_to_tree.replace('_rooted',''), # deal with rootedness at some point
# smaller values keeps edges in smaller central
# zone allowing for longer tip labels
plot_width_prop = 0.75,
plot_height_prop = 0.75)
# for translating labels: supply tab delimited list of tip label and desired label
# args.use_names
## either provide string that is path to raw ClonalFrameML output
#plot_transfers = 'Both_all_core_positions_rooted.importation_status.txt'
## or provide a SNPs_by_homoplasies processed dictionary from a .summarise_recombined_variants() analysis
#plot_transfers = phylo_analyser.SNPs_by_homoplasies
if args.plot_transfers:
plot_transfers = args.path_to_tree.replace('labelled_tree.newick','importation_status.txt').replace('_rooted','') # deal with rootedness at some point
else:
plot_transfers = False
if args.out_group:
outgroup_label_list = args.out_group
else:
outgroup_label_list = []
phylo_plotter.doPlot(outgroup_label_list = outgroup_label_list,
stroke_width = 3,
label_size = 15,
plotinnerlabels = False,
plottiplabels = True,
plot_extra_lines = False,
direction = 'right',
plot_transfers = plot_transfers,
use_names = args.use_names,
scale_bar = True,
genome_length = genome_length)
### Assemble Reads ###
if args.subparser == 'AssembleReads':
print('\n-- Reads Assembly module --')
import baga
from baga import AssembleReads
for this_reads_name in args.reads_name:
use_path_reads,use_name_reads = check_baga_path('baga.PrepareReads.Reads', this_reads_name)
e = 'Could not locate a saved baga.PrepareReads.Reads-<reads_name>.baga '\
'for reads group given: {}'.format(this_reads_name)
assert all([use_path_reads,use_name_reads]), e
print('Loading processed reads group %s' % use_name_reads)
prepared_reads = baga.bagaload(use_path_reads)
if args.program == 'spades':
reads = AssembleReads.DeNovo(baga = prepared_reads)
if args.max_memory:
use_mem_gigs = args.max_memory
else:
# round down available GBs
use_mem_gigs = int(baga.get_available_memory())
# for more reliable: only_assembler = True, careful = False
reads.SPAdes(mem_num_gigs = use_mem_gigs, only_assembler = False,
careful = True)
# if args.delete_intermediates:
# print('Checking on intermediate fastq files to delete . . .')
# if not args.adaptors and not args.trim:
# # load a previously adaptor cut reads set
# print('Loading processed reads group %s' % use_name_reads)
# reads = baga.bagaload('baga.PrepareReads.Reads-%s' % use_name_reads)
# total_size = 0
# # check stage 1 files and stage 2 files
# if hasattr(reads,'read_files') and hasattr(reads,'adaptorcut_read_files'):
# stage1s = check_files(reads.read_files)
# stage2s = check_files(reads.adaptorcut_read_files)
# if stage2s:
# if stage1s:
# # delete stage 1 files if have all <== not for now . . .
# # print('Deleting original or subsampled fastq files . . .')
# # total_size += delete_files(reads.read_files, extra = '_subsmp')
# print('Retaining original fastq files even though processed versions exist because re-downloading is time consuming!')
# else:
# print('Some or all of original or subsampled fastq files seem to have been deleted')
# else:
# print('Missing some cutadapt-processed files: not deleting originals or subsampled')
# if hasattr(reads,'adaptorcut_read_files') and hasattr(reads,'trimmed_read_files'):
# stage2s = check_files(reads.adaptorcut_read_files)
# stage3s = check_files(reads.trimmed_read_files)
# if stage3s:
# if stage2s:
# # delete stage 2 files if have all
# print('Deleting cutadapt-processed fastq files . . .')
# total_size += delete_files(reads.adaptorcut_read_files)
# else:
# print('Some or all of cutadapt-processed fastq files seem to have been deleted')
# else:
# print('Missing some sickle-processed files: not deleting cutadapt-processed')
# if total_size:
# print('Saved {:.2f} Gb by deleting intermediate files'.format(total_size/1000000000.0))
# else:
# print('Nothing deleted.')
| daveuu/baga | baga_cli.py | Python | gpl-3.0 | 167,531 | [
"BWA",
"pysam"
] | 721ba3dc265c99cb6a15a8f242fa01d65d2d0711a2b0f8e134644b1c54bd58a6 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import subprocess
import os
import shutil
from rmgpy import getPath
from rmgpy.qm.main import QMSettings, QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.gaussian import Gaussian
from rmgpy.qm.mopac import Mopac
class TestQMSettings(unittest.TestCase):
"""
Contains unit tests for the QMSettings class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
self.settings1 = QMSettings(software = 'mopac',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = None,
onlyCyclics = False,
maxRadicalNumber = 0,
)
self.settings2 = QMSettings()
def testCheckAllSet(self):
"""
Test that checkAllSet() works correctly.
"""
try:
self.settings1.checkAllSet()
except AssertionError:
self.fail("checkAllSet() raised unexpected AssertionError.")
with self.assertRaises(AssertionError):
self.settings2.checkAllSet()
class TestQMCalculator(unittest.TestCase):
"""
Contains unit tests for the QMSettings class.
"""
mopExecutablePath = Mopac.executablePath
if not os.path.exists(mopExecutablePath):
NO_MOPAC = NO_LICENCE = True
else:
NO_MOPAC = False
process = subprocess.Popen(mopExecutablePath,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdut, stderr = process.communicate("\n")
NO_LICENCE = 'To install the MOPAC license' in stderr
gaussExecutablePath = Gaussian.executablePath
NO_GAUSSIAN = not os.path.exists(gaussExecutablePath)
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles')
self.mop1 = QMCalculator(software = 'mopac',
method = 'pm3',
fileStore = fileStore
)
self.mop2 = QMCalculator(software = 'mopac',
method = 'pm6',
)
self.mop3 = QMCalculator(software = 'mopac',
method = 'pm7',
fileStore = fileStore
)
self.mop4 = QMCalculator(software = 'mopac',
method = 'pm8',
fileStore = fileStore
)
self.gauss1 = QMCalculator(software = 'gaussian',
method = 'pm3',
)
self.gauss2 = QMCalculator(software = 'gaussian',
method = 'pm6',
fileStore = fileStore
)
self.gauss3 = QMCalculator(software = 'gaussian',
method = 'pm7',
fileStore = fileStore
)
self.molpro1 = QMCalculator(software = 'molpro',
method = 'mp2',
fileStore = fileStore
)
self.qmmol1 = QMCalculator(fileStore=fileStore)
self.qmmol2 = QMCalculator(fileStore=fileStore)
def testSetDefaultOutputDirectory(self):
"""
Test that setDefaultOutputDirectory() works correctly.
"""
self.assertIsNotNone(self.mop1.settings.fileStore)
self.assertIsNotNone(self.mop3.settings.fileStore)
self.assertIsNotNone(self.gauss2.settings.fileStore)
self.assertIsNone(self.mop2.settings.fileStore)
self.assertIsNone(self.gauss1.settings.fileStore)
self.assertIsNone(self.mop1.settings.scratchDirectory)
self.assertIsNone(self.mop2.settings.scratchDirectory)
self.assertIsNone(self.mop3.settings.scratchDirectory)
self.assertIsNone(self.gauss1.settings.scratchDirectory)
self.assertIsNone(self.gauss2.settings.scratchDirectory)
# Now set the default directories for those not set
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..','..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
self.assertIsNotNone(self.mop1.settings.fileStore)
self.assertIsNotNone(self.mop2.settings.fileStore)
self.assertIsNotNone(self.mop3.settings.fileStore)
self.assertIsNotNone(self.gauss1.settings.fileStore)
self.assertIsNotNone(self.gauss2.settings.fileStore)
self.assertIsNotNone(self.mop1.settings.scratchDirectory)
self.assertIsNotNone(self.mop2.settings.scratchDirectory)
self.assertIsNotNone(self.mop3.settings.scratchDirectory)
self.assertIsNotNone(self.gauss1.settings.scratchDirectory)
self.assertIsNotNone(self.gauss2.settings.scratchDirectory)
def testInitialize(self):
"""
Test that initialize() works correctly.
"""
# Now set the default directories for those not set
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
try:
self.mop1.initialize()
self.mop2.initialize()
self.mop3.initialize()
self.gauss1.initialize()
self.gauss2.initialize()
except AssertionError:
self.fail("initialize() raised unexpected AssertionError.")
except Exception:
self.fail("initialize() raised Exception. Output file paths not correctly set.")
def testGetThermoData(self):
"""
Test that getThermoData() fails when expected.
"""
outputDirectory = os.path.join(self.mop4.settings.fileStore, '..', '..')
self.mop4.setDefaultOutputDirectory(outputDirectory)
self.gauss3.setDefaultOutputDirectory(outputDirectory)
self.molpro1.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
with self.assertRaises(Exception):
self.mop4.getThermoData(mol)
self.gauss3.getThermoData(mol)
self.molpro1.getThermoData(mol)
@unittest.skipIf(NO_MOPAC, "MOPAC not found. Try resetting your environment variables if you want to use it.")
@unittest.skipIf(NO_LICENCE, "MOPAC license not installed. Run mopac for instructions")
def testGetThermoDataMopac(self):
"""
Test that Mocpac getThermoData() works correctly.
"""
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
for directory in (self.mop1.settings.fileStore, self.mop1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.mop2.settings.fileStore, self.mop2.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.mop3.settings.fileStore, self.mop3.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
thermo1 = self.mop1.getThermoData(mol)
thermo2 = self.mop2.getThermoData(mol)
thermo3 = self.mop3.getThermoData(mol)
self.assertTrue(thermo1.comment.startswith('QM MopacMolPM3'))
self.assertTrue(thermo2.comment.startswith('QM MopacMolPM6'))
self.assertTrue(thermo3.comment.startswith('QM MopacMolPM7'))
self.assertAlmostEqual(thermo1.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(thermo1.S298.value_si, 334.5007584, 1) # to 1 decimal place
self.assertAlmostEqual(thermo2.H298.value_si, 167704.4270, 1) # to 1 decimal place
self.assertAlmostEqual(thermo2.S298.value_si, 338.0999241, 1) # to 1 decimal place
self.assertAlmostEqual(thermo3.H298.value_si, 166168.8571, 1) # to 1 decimal place
self.assertAlmostEqual(thermo3.S298.value_si, 336.3330406, 1) # to 1 decimal place
@unittest.skipIf(NO_GAUSSIAN, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def testGetThermoDataGaussian(self):
"""
Test that Gaussian getThermoData() works correctly.
"""
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
for directory in (self.gauss1.settings.fileStore, self.gauss1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.gauss1.settings.fileStore, self.gauss2.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
thermo1 = self.gauss1.getThermoData(mol)
thermo2 = self.gauss2.getThermoData(mol)
self.assertTrue(thermo1.comment.startswith('QM GaussianMolPM3'))
self.assertTrue(thermo2.comment.startswith('QM GaussianMolPM6'))
self.assertAlmostEqual(thermo1.H298.value_si, 169908.3376, 0) # to 1 decimal place
self.assertAlmostEqual(thermo1.S298.value_si, 335.5438748, 0) # to 1 decimal place
self.assertAlmostEqual(thermo2.H298.value_si, 169326.2504, 0) # to 1 decimal place
self.assertAlmostEqual(thermo2.S298.value_si, 338.2696063, 0) # to 1 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
| nyee/RMG-Py | rmgpy/qm/mainTest.py | Python | mit | 9,396 | [
"Gaussian",
"MOPAC",
"Molpro"
] | aeed0a6bdb350d299708dd787ef8e3c6b6e69c79d372b74a91b36e887b2f9778 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBrian(PythonPackage):
"""A clock-driven simulator for spiking neural networks"""
homepage = "https://www.briansimulator.org"
pypi = "brian/brian-1.4.3.tar.gz"
version('1.4.3', sha256='c881dcfcd1a21990f9cb3cca76cdd868111cfd9e227ef5c1b13bb372d2efeaa4')
depends_on('py-matplotlib@0.90.1:', type=('build', 'run'))
depends_on('py-numpy@1.4.1:', type=('build', 'run'))
depends_on('py-scipy@0.7.0:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-brian/package.py | Python | lgpl-2.1 | 687 | [
"Brian"
] | 322a7fcba447f96a7b6bef9608505844d3cecdbca598b8dd703283aad573db55 |
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
def disable_GUI(code):
# integrate without visualizer
breakpoint = "visualizer.run(1)"
assert breakpoint in code
code = code.replace(breakpoint, "steps=1\nsystem.integrator.run(steps)", 1)
return code
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/visualization_cellsystem.py",
substitutions=disable_GUI, steps=100)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/scripts/samples/test_visualization_cellsystem.py | Python | gpl-3.0 | 1,263 | [
"ESPResSo"
] | e7d65457b3a28587a39b18fa00c584b9d06105b3632f908fedcec6d7b4009277 |
#!/usr/bin/env python
#
# This program straightens the spinal cord of an anatomic image, apply a smoothing in the z dimension and apply
# the inverse warping field to get back the curved spinal cord but smoothed.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Simon Levy
# Modified: 2014-09-01
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: maybe no need to convert RPI at the beginning because strainghten spinal cord already does it!
import getopt
import os
import sys
import time
import sct_utils as sct
from sct_orientation import set_orientation
from numpy import append, insert, nonzero, transpose, array
from nibabel import load, Nifti1Image, save
from scipy import ndimage
from copy import copy
class Param:
## The constructor
def __init__(self):
self.remove_temp_files = 1 # remove temporary files
self.verbose = 1
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# Initialization
fname_anat = ''
fname_centerline = ''
sigma = 3 # default value of the standard deviation for the Gaussian smoothing (in terms of number of voxels)
remove_temp_files = param.remove_temp_files
verbose = param.verbose
start_time = time.time()
# Check input param
try:
opts, args = getopt.getopt(sys.argv[1:], 'hi:c:r:s:v:')
except getopt.GetoptError as err:
print str(err)
usage()
if not opts:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-c'):
fname_centerline = arg
elif opt in ('-i'):
fname_anat = arg
elif opt in ('-r'):
remove_temp_files = arg
elif opt in ('-s'):
sigma = arg
elif opt in ('-v'):
verbose = int(arg)
# Display usage if a mandatory argument is not provided
if fname_anat == '' or fname_centerline == '':
usage()
# Display arguments
print '\nCheck input arguments...'
print ' Volume to smooth .................. ' + fname_anat
print ' Centerline ........................ ' + fname_centerline
print ' FWHM .............................. '+str(sigma)
print ' Verbose ........................... '+str(verbose)
# Check existence of input files
print('\nCheck existence of input files...')
sct.check_file_exist(fname_anat, verbose)
sct.check_file_exist(fname_centerline, verbose)
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
# create temporary folder
print('\nCreate temporary folder...')
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files to temporary folder
print('\nCopy files...')
sct.run('isct_c3d '+fname_anat+' -o '+path_tmp+'/anat.nii')
sct.run('isct_c3d '+fname_centerline+' -o '+path_tmp+'/centerline.nii')
# go to tmp folder
os.chdir(path_tmp)
# Change orientation of the input image into RPI
print '\nOrient input volume to RPI orientation...'
set_orientation('anat.nii', 'RPI', 'anat_rpi.nii')
# Change orientation of the input image into RPI
print '\nOrient centerline to RPI orientation...'
set_orientation('centerline.nii', 'RPI', 'centerline_rpi.nii')
## new
### Make sure that centerline file does not have halls
file_c = load('centerline_rpi.nii')
data_c = file_c.get_data()
hdr_c = file_c.get_header()
data_temp = copy(data_c)
data_temp *= 0
data_output = copy(data_c)
data_output *= 0
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('centerline_rpi.nii')
## Change seg to centerline if it is a segmentation
sct.printv('\nChange segmentation to centerline if it is a centerline...\n')
z_centerline = [iz for iz in range(0, nz, 1) if data_c[:,:,iz].any() ]
nz_nonz = len(z_centerline)
if nz_nonz==0 :
print '\nERROR: Centerline is empty'
sys.exit()
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
#print("z_centerline", z_centerline,nz_nonz,len(x_centerline))
print '\nGet center of mass of the centerline ...'
for iz in xrange(len(z_centerline)):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(array(data_c[:,:,z_centerline[iz]]))
data_temp[x_centerline[iz], y_centerline[iz], z_centerline[iz]] = 1
## Complete centerline
sct.printv('\nComplete the halls of the centerline if there are any...\n')
X,Y,Z = data_temp.nonzero()
x_centerline_extended = [0 for i in range(0, nz, 1)]
y_centerline_extended = [0 for i in range(0, nz, 1)]
for iz in range(len(Z)):
x_centerline_extended[Z[iz]] = X[iz]
y_centerline_extended[Z[iz]] = Y[iz]
X_centerline_extended = nonzero(x_centerline_extended)
X_centerline_extended = transpose(X_centerline_extended)
Y_centerline_extended = nonzero(y_centerline_extended)
Y_centerline_extended = transpose(Y_centerline_extended)
# initialization: we set the extrem values to avoid edge effects
x_centerline_extended[0] = x_centerline_extended[X_centerline_extended[0]]
x_centerline_extended[-1] = x_centerline_extended[X_centerline_extended[-1]]
y_centerline_extended[0] = y_centerline_extended[Y_centerline_extended[0]]
y_centerline_extended[-1] = y_centerline_extended[Y_centerline_extended[-1]]
# Add two rows to the vector X_means_smooth_extended:
# one before as means_smooth_extended[0] is now diff from 0
# one after as means_smooth_extended[-1] is now diff from 0
X_centerline_extended = append(X_centerline_extended, len(x_centerline_extended)-1)
X_centerline_extended = insert(X_centerline_extended, 0, 0)
Y_centerline_extended = append(Y_centerline_extended, len(y_centerline_extended)-1)
Y_centerline_extended = insert(Y_centerline_extended, 0, 0)
#recurrence
count_zeros_x=0
count_zeros_y=0
for i in range(1,nz-1):
if x_centerline_extended[i]==0:
x_centerline_extended[i] = 0.5*(x_centerline_extended[X_centerline_extended[i-1-count_zeros_x]] + x_centerline_extended[X_centerline_extended[i-count_zeros_x]])
count_zeros_x += 1
if y_centerline_extended[i]==0:
y_centerline_extended[i] = 0.5*(y_centerline_extended[Y_centerline_extended[i-1-count_zeros_y]] + y_centerline_extended[Y_centerline_extended[i-count_zeros_y]])
count_zeros_y += 1
# Save image centerline completed to be used after
sct.printv('\nSave image completed: centerline_rpi_completed.nii...\n')
for i in range(nz):
data_output[x_centerline_extended[i],y_centerline_extended[i],i] = 1
img = Nifti1Image(data_output, None, hdr_c)
save(img, 'centerline_rpi_completed.nii')
#end new
# Straighten the spinal cord
print '\nStraighten the spinal cord...'
sct.run('sct_straighten_spinalcord -i anat_rpi.nii -c centerline_rpi_completed.nii -x spline -v '+str(verbose))
# Smooth the straightened image along z
print '\nSmooth the straightened image along z...'
sct.run('isct_c3d anat_rpi_straight.nii -smooth 0x0x'+str(sigma)+'vox -o anat_rpi_straight_smooth.nii', verbose)
# Apply the reversed warping field to get back the curved spinal cord
print '\nApply the reversed warping field to get back the curved spinal cord...'
sct.run('sct_apply_transfo -i anat_rpi_straight_smooth.nii -o anat_rpi_straight_smooth_curved.nii -d anat.nii -w warp_straight2curve.nii.gz -x spline', verbose)
# come back to parent folder
os.chdir('..')
# Generate output file
print '\nGenerate output file...'
sct.generate_output_file(path_tmp+'/anat_rpi_straight_smooth_curved.nii', file_anat+'_smooth'+ext_anat)
# Remove temporary files
if remove_temp_files == 1:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
# Display elapsed time
elapsed_time = time.time() - start_time
print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s\n'
# to view results
sct.printv('Done! To view results, type:', verbose)
sct.printv('fslview '+file_anat+' '+file_anat+'_smooth &\n', verbose, 'info')
#=======================================================================================================================
# usage
#=======================================================================================================================
def usage():
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>\n' \
'\n'\
'DESCRIPTION\n' \
' Smooth the spinal cord along its centerline. Steps are: 1) Spinal cord is straightened (using\n' \
' centerline), 2) a Gaussian kernel is applied in the superior-inferior direction, 3) then cord is\n' \
' de-straightened as originally.\n' \
'\n' \
'USAGE\n' \
' sct_smooth_spinalcord -i <image> -c <centerline/segmentation>\n' \
'\n' \
'MANDATORY ARGUMENTS\n' \
' -i <image> input image to smooth.\n' \
' -c <centerline> spinal cord centerline or segmentation.\n' \
'\n' \
'OPTIONAL ARGUMENTS\n' \
' -s sigma of the smoothing Gaussian kernel (in voxel). Default=3.' \
' -r {0,1} remove temporary files. Default='+str(param_default.remove_temp_files)+'\n' \
' -v {0,1,2} verbose. 0: nothing, 1: small, 2: extended, 3: fig. Default='+str(param_default.verbose)+'\n' \
' -h help. Show this message.\n' \
'\n'
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
param = Param()
param_default = Param()
main() | benjamindeleener/scad | scripts/sct_smooth_spinalcord.py | Python | mit | 10,698 | [
"Gaussian"
] | a4b534094c973033f23675863b2a467b394dce7bd0af0396a638892bcf9ff0df |
"""
SIRV_Generate_heatmap.py
This script generates a heatmap of the estimated mean abundance compared to the expected abundance in all SIRVs.
see python path/to/SIRV_Generate_boxplot.py -h
updated: 25 Oct 2016 Patrick Schagerl
(c)2016 Lexogen GmbH
Examples:
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Generate_heatmap.py -count_list input_files/alignments/NGS1.73_star_out/incomplete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -quant cufflinks -o results/SIRV_heatmap_I
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Generate_heatmap.py -count_list input_files/alignments/NGS1.73_star_out/complete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -quant cufflinks -o results/SIRV_heatmap_C
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Generate_heatmap.py -count_list input_files/alignments/NGS1.73_star_out/overannotated/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -quant cufflinks -o results/SIRV_heatmap_O
"""
# can be used to visualize the overannotated SIRVs (value <1/64...green; else...red) marked with _O in png/svg
include_overannotated = False
import sys
# for Galaxy version
import SIRV_links
sys.path.append(SIRV_links.matplotlib)
### imports
import numpy
from numpy import ma
import textwrap
import matplotlib
import matplotlib.pyplot as plt
import math
#import openpyxl
import argparse
from matplotlib.pyplot import *
import csv
import SIRV_preprocessing_inputs
from matplotlib.colors import LinearSegmentedColormap
#from openpyxl.formatting.rule import ColorScaleRule
#from openpyxl.cell import get_column_letter
### function
def heatmap(experiment, sample, replicates, controls, SIRV_mix_files, counts, thres, disable_output, disable_plot,output_heatmap_png, output_heatmap_csv, output_heatmap_svg, summary_exp):
if not disable_output:
print "create heatmap"
samples = len(controls)
repl_incr = numpy.zeros(len(replicates) + 1, dtype=int)
repl_incr[0] = 0
for i in range(0, len(replicates)):
repl_incr[i + 1] = sum(replicates[0 : i + 1])
# experiment_assigned = []
sample_assigned = []
for i in repl_incr[ : -1]:
# experiment_assigned.append(experiment)
sample_assigned.append(sample[i])
# obtain the SIRVs version
SIRV_type=None
if "SIRV103" not in counts:
SIRV_type = "I"
elif "SIRV104" in counts:
SIRV_type = "O"
else:
SIRV_type = "C"
# obtain SIRV information (sum, abundance, gene, submix)
SIRV, sum_mix, submixes = SIRV_preprocessing_inputs.get_SIRV_information(SIRV_mix_files,SIRV_type)
sum_repl_counts = numpy.sum(counts.values(), axis=0)
mean_sum_repl_counts = numpy.zeros(sum(replicates))
norm_factor = numpy.zeros(sum(replicates))
for i in range(0, samples):
mean_sum_repl_counts[repl_incr[i] : repl_incr[i + 1]] = numpy.mean(sum_repl_counts[repl_incr[i] : repl_incr[i + 1]])
norm_factor[repl_incr[i] : repl_incr[i + 1]] = float(1)/(numpy.mean(sum_repl_counts[repl_incr[i] : repl_incr[i + 1]]))*sum_mix[controls[i]]
normalized_values = {}
for key in counts:
normalized_values[key] = counts[key]*norm_factor
normalized_values[key][normalized_values[key] < thres] = thres
mean_normalized_values = {}
for key in counts:
mean_normalized_values[key] = []
for i in range(0, samples):
mean_normalized_values[key].append(numpy.mean(normalized_values[key][repl_incr[i] : repl_incr[i + 1]]))
#### Calculate the LFC ####
#add all SIRVs (complete, no overannotated) to LFC
LFC = {}
for key in SIRV:
if SIRV[key]['E0'] != 0:
LFC[key] = numpy.full(samples, float('nan'))
# include overannotated just if include_overannotated is True
else:
if include_overannotated:
LFC[key+"_O"] = numpy.full(samples, float('nan'))
# SIRV[key]['E0'] = float(1)/64
#calculate LFC for all SIRVs (no overannotated SIRVs --> value/0)
for key in mean_normalized_values:
if SIRV[key]['E0'] != 0:
LFC[key] = numpy.empty(samples)
for i in range(0, len(controls)):
LFC[key][i] = math.log(mean_normalized_values[key][i]/SIRV[key][controls[i]], 2)
# include overannotated just if include_overannotated is True if value below 1/64...green and else...red
else:
if include_overannotated:
LFC[key+"_O"] = numpy.empty(samples)
for i in range(0, len(controls)):
if mean_normalized_values[key][i] < float(1)/64:
LFC[key+"_O"][i] = 0 # green
else:
LFC[key+"_O"][i] = 1 # red
#### HEATMAP in Excel template ####
# wb = openpyxl.load_workbook(args.input_heatmap)
#
# sheet = wb.get_sheet_by_name('heatmap')
#
# #get the next free column
# column_insert = 8
# while sheet.cell(row=4, column=column_insert).value is not None:
# column_insert += 3
#
#insert the values at the right position
number_SIRVs = 0
for key in LFC:
number_SIRVs += 1
# line = 5
# while key != sheet.cell(row=line, column=3).value:
# line += 1
# for i in range(0, len(controls)):
# sheet.cell(row=line, column=column_insert + i).value=LFC[key][i]
# sheet.cell(row=line, column=column_insert + i).font=sheet.cell(row=line, column=8).font
# sheet.cell(row=line, column=column_insert + i).border=sheet.cell(row=line, column=8).border
# sheet.cell(row=line, column=column_insert + i).number_format=sheet.cell(row=line, column=8).number_format
# sheet.cell(row=line, column=column_insert + i).alignment=sheet.cell(row=line, column=8).alignment
#insert the SIRV control (E0, E1 or E2) in row 4
# for i in range(0, len(controls)):
# sheet.cell(row=4, column=column_insert + i).font=sheet.cell(row=4, column=8).font
# sheet.cell(row=3, column=column_insert + i).font=sheet.cell(row=3, column=8).font
# sheet.cell(row=2, column=column_insert + i).font=sheet.cell(row=2, column=8).font
# sheet.cell(row=4, column=column_insert + i).border=sheet.cell(row=4, column=8).border
# sheet.cell(row=3, column=column_insert + i).border=sheet.cell(row=3, column=8).border
# sheet.cell(row=2, column=column_insert + i).border=sheet.cell(row=2, column=8).border
# sheet.cell(row=4, column=column_insert + i).fill=sheet.cell(row=4, column=8).fill
# sheet.cell(row=3, column=column_insert + i).fill=sheet.cell(row=3, column=8).fill
# sheet.cell(row=2, column=column_insert + i).fill=sheet.cell(row=2, column=8).fill
# sheet.cell(row=4, column=column_insert + i).number_format=sheet.cell(row=4, column=8).number_format
# sheet.cell(row=3, column=column_insert + i).number_format=sheet.cell(row=3, column=8).number_format
# sheet.cell(row=2, column=column_insert + i).number_format=sheet.cell(row=2, column=8).number_format
# sheet.cell(row=4, column=column_insert + i).alignment=sheet.cell(row=4, column=8).alignment
# sheet.cell(row=3, column=column_insert + i).alignment=sheet.cell(row=3, column=8).alignment
# sheet.cell(row=2, column=column_insert + i).alignment=sheet.cell(row=2, column=8).alignment
# sheet.cell(row=4, column=column_insert + i).value=controls[i]
# sheet.cell(row=3, column=column_insert + i).value=sample_assigned[i]
# sheet.cell(row=2, column=column_insert + i).value=experiment_assigned[i]
# sheet.conditional_formatting.add('H5:' + get_column_letter(column_insert + i) + '73', ColorScaleRule(start_type='num', start_value=-1, start_color='00425f', mid_type='num', mid_value=0, mid_color='95c00f', end_type='num', end_value=1, end_color='FF0000') )
# wb.save(args.output_heatmap)
### HEATMAP in csv ###
controls_csv = []
experiments_csv = []
for item in controls:
controls_csv.append(item)
experiments_csv.append(experiment)
with open(output_heatmap_csv, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["experiment"] + experiments_csv)
writer.writerow(["sample"] + sample_assigned)
writer.writerow(["control"] + controls_csv)
for key in sorted(LFC):
s = []
s.append(key)
for item in LFC[key]:
s.append(item)
writer.writerow(s)
### without csv import
# controls_csv = []
# for item in controls:
# controls_csv.append(item)
#
# with open(output_heatmap_csv, 'w') as csvfile:
# csvfile.write("experiment")
# for i in range(0, len(sample_assigned)):
# csvfile.write(","+str(experiment))
# csvfile.write("\n")
# csvfile.write("sample")
# for i in range(0, len(sample_assigned)):
# csvfile.write(","+sample_assigned[i])
# csvfile.write("\n")
# csvfile.write("control")
# for i in range(0, len(controls_csv)):
# csvfile.write(","+controls_csv[i])
# csvfile.write("\n")
# for key in sorted(LFC):
# s = key
# for item in LFC[key]:
# s+=","+str(item)
# csvfile.write(s+"\n")
### GENERATE HEATMAP IN PYPLOT FIGURE ###
SIRV_list = numpy.chararray(number_SIRVs, itemsize=7)
#get all SIRV names
a = 0
for key in sorted(LFC):
SIRV_list[a] = key
a += 1
#get all SIRV gene names
SIRV_gene_list={}
for item in SIRV_list:
SIRV_gene_list[item[0:5]]=1
number_SIRV_genes = len(SIRV_gene_list)
#print number_SIRV_genes
#allocate variables
values = numpy.empty([number_SIRVs + number_SIRV_genes -1, len(replicates)]) ###for pyplot
if include_overannotated == False:
SIRV_list_axis = numpy.chararray(number_SIRVs + number_SIRV_genes -1, itemsize=7)
else:
SIRV_list_axis = numpy.chararray(number_SIRVs + number_SIRV_genes -1, itemsize=9)
# include_overannotated = False 7 for just SIRVs
# include_overannotated = True 9 including _O for pyplot
#assign values
a = 0
for key in sorted(LFC):
if a > 0:
if key[0:5] != previous:
for i in range(0, len(controls)):
values[a, i] = float('nan')
SIRV_list_axis[a] = ""
a+=1
for i in range(0, len(controls)):
values[a, i] = LFC[key][i]
previous = key[0:5]
SIRV_list_axis[a] = key
a += 1
colors = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
n_bins = 100
cm = LinearSegmentedColormap.from_list("my_color_map", colors, N=n_bins)
cm.set_bad('w', 1.0)
#values = ma.masked_where(values == 0, values)
values = ma.masked_invalid(values)
#print number_SIRVs+number_SIRV_genes-1
fig, ax = plt.subplots(1, figsize=(len(replicates) + 2, (number_SIRVs+number_SIRV_genes-1)/5))
#fig, ax = plt.subplots(1, figsize=(5, number_SIRVs/5))
ax.set_ylim(0, number_SIRVs+number_SIRV_genes-1)
ax.set_xlim(0, len(controls))
p = ax.pcolormesh(values,vmin=-1, vmax=1, edgecolor='w', cmap=cm)
cbar=fig.colorbar(p, ticks=[-1, 0, 1], shrink=0.5, pad=float(0.1)/len(replicates), fraction=0.577*numpy.power(len(replicates), -0.733))
cbar.ax.set_yticklabels(['<= -1', '0', '>= 1'])
fig.subplots_adjust(left=float(1)/(len(replicates) + 2), bottom=0.01, right=1, top=0.96, wspace = 0.01)
x_axis=numpy.linspace(0.5, len(replicates) - 0.5, len(replicates))
repl = []
for i in range(0, len(x_axis)):
repl.append(sample_assigned[i] + "\n" + controls[i])
ax.set_xticks(x_axis)
ax.set_xticklabels(repl, fontsize='small')
y_axis = numpy.linspace(0.5, number_SIRVs+number_SIRV_genes-1 - 0.5, number_SIRVs+number_SIRV_genes-1)
ax.set_yticks(y_axis)
ax.set_yticklabels(SIRV_list_axis)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off')
#plt.title("heatmap" + "\n" + str(experiment), y=1.035)
#plt.savefig("test.svg", dpi=fig.dpi)#https://github.com/matplotlib/matplotlib/issues/786/
#plt.savefig("test.svg", dpi=100)
figure = plt.gcf() # get current figure
figure.set_size_inches(len(replicates) + 2, (number_SIRVs+number_SIRV_genes-1)/5)
# when saving, specify the DPI
plt.savefig(output_heatmap_png, dpi=figure.dpi, format="png")
plt.savefig(output_heatmap_svg, dpi=figure.dpi, format="svg")
#plt.imsave("test_2.png")#, bbox_inches='tight')
if not disable_plot:
plt.show()
else:
plt.close()
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['HEATMAP'] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['HEATMAP']['OUTPUT_PNG'] = output_heatmap_png
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['HEATMAP']['OUTPUT_CSV'] = output_heatmap_csv
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['HEATMAP']['OUTPUT_SVG'] = output_heatmap_svg
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['HEATMAP']['LEGEND'] = "Figure: Heatmap of individual SIRV transcripts are visualized comparing measured and expected values (log fold changes)"
return summary_exp
### calling from the command line
if __name__ == "__main__":
# create the help and user friendly method for data input/selection
parser = argparse.ArgumentParser(prog='PROG', usage='%(prog)s [options]')
parser = argparse.ArgumentParser(prog='python SIRV_Generate_heatmap.py', formatter_class=argparse.RawDescriptionHelpFormatter,description=textwrap.dedent('''This script generates a heatmap of individual SIRV transcripts comparing measured and expected log fold change (LFC) values. blue...understimated abundance in SIRV transcript, red...overestimated abundance in SIRV transcript. Outputs are a csv, svg, and png file.
NOTES:
Normalization and filtering in SIRVs:
1. SIRV reads are normalized using the overall mean and expected sum of the SIRV mix
2. SIRV values below the relative quantity threshold are set to the relative quantity threshold
'''), epilog="""
Example usage:
python path/to/SIRV_Generate_boxplot.py -e "Experiment name" -count_list input_files/alignments/NGS_star_out/*/isoforms.fpkm_tracking -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -quant cufflinks -o boxplot_output
End of help""")
required = parser.add_argument_group('required arguments')
required.add_argument('-e', dest='experiment', help='name/id of experiment', type=str, required=True, metavar='name')
required.add_argument('-count_list', type=str, nargs='+', help='List of files with count values', required=True, metavar='count_file')
required.add_argument('-s', dest='sample', help='name/id of sample (assign an sample to every count file)', type=str, nargs='+', required=True, metavar='name')
required.add_argument('-c',dest='control', help='controls spiked in in replicate (assign a control to every count file)', type=str, nargs='+', choices=['E0', 'E1', 'E2'], required=True)
parser.add_argument('-thres', dest='thres', type=float, help='relative quantity threshold which is the minimum count value, like the minimum detection value (default=1e-6)', metavar='float', default=1e-6)
#required.add_argument('-ih', dest='input_heatmap', type=str, help='heatmap input', metavar='file', default='input_files/heatmap_template.xlsx')
required.add_argument('-quant', dest='quant', help='quantification method used for abundance estimation', required=True, type=str, choices=['cufflinks', 'mix2', 'RSEM'])
# parser.add_argument('-JSON', dest='JSON_file', help='disable the plot output',type=str, metavar='json_file')
parser.add_argument('-do', dest='disable_output', action='store_true', help='disable console output')
parser.add_argument('-dp', dest='disable_plot', action='store_true', help='disable plot output')
required.add_argument('-o', dest='output_heatmap', type=str, help='heatmap output', metavar='file', required=True)
args = parser.parse_args()
if len(args.count_list) != len(args.control) or len(args.count_list) != len(args.sample):
if not args.disable_output:
print "please assign a experiment and sample to every dataset"
else:
if not args.disable_output:
print "processing files"
### get experiment and samples and sort them, obtain an order for retrieval of the count files
order,samples_ordered, replicates, controls=SIRV_preprocessing_inputs.obtain_information(args.sample,args.control,args.disable_output)
### obtain count values already sorted
count_SIRVs,_,_,_=SIRV_preprocessing_inputs.counts_converter(order, args.count_list,args.quant)
heatmap(args.experiment, samples_ordered, replicates, controls, SIRV_links.SIRV_mix_files, count_SIRVs, args.thres, args.disable_output, args.disable_plot, args.output_heatmap + ".png", args.output_heatmap + ".csv", args.output_heatmap + ".svg", None)
| sirvsuite-support/sirvsuite | tools/SIRV_Generate_heatmap.py | Python | gpl-3.0 | 16,115 | [
"Galaxy"
] | a1e9f4a6f56c1187120648d2d07e9d397369a12e29e41399609bc62596f74794 |
# We only import librairies needed for plotting
# Other librairies are imported in the class definition file, G3D_class.py,
# which contains all process and variables function definition.
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import G3D_class
# We instantiate an object of the class G3D, just by giving the path to the netcdf file to work with
# Up to now I'm working with 4D netcdf files containing several variables.
# Outputs from different files can be merged easily, as can be seen in other examples
G = G3D_class.G3D('../data/CART1CLIP/1980.nc')
# All loaded variables are attributes of the G3D instance.
# For instance the variable "bat" is defined directly when the object is instantiated.
# Other are loaded only when needed.
# Variables are python Masked_array, so they have an attribute mask which is an arreay of booleans
# Here we want to define a mask based on bathymetry
maskDS= (G.bat<50 ) & ~(G.bat.mask) # Mask should be True where masked
maskSH= (G.bat>=50) & ~(G.bat.mask) # Mask should be True where masked
# All processing functions are called as function of the G3D instance.
# Variable name is given as an argument. Some functions allows more argument.
# This would give the basin averaged time series of salinity
T1 = G.avgspatial('SAL')
# The avgspatial function enables an optional mask argument
# Note also , that we can use a variable name that is not defined in the netcdf file.
# In this case the toolbox will automatically look for the function "instance_SSS"
sssDS=G.avgspatial('SSS',maskDS)
sssSH=G.avgspatial('SSS',maskSH)
# The following is general python plotting ..
# the "dates" attributes is also loaded automatically
####################
# 1st figure :
####################
locator = mdates.AutoDateLocator()
formator = mdates.AutoDateFormatter(locator)
fig=plt.figure(figsize=(15, 8))
ax=plt.subplot(1, 1, 1)
ax.xaxis_date()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formator)
plt.plot(G.dates,sssSH, label = 'Average surface salinity on the shelf')
plt.plot(G.dates,sssDS, label = 'Average surface salinity in the open sea')
plt.title('Salinity')
plt.ylabel('Salinity - [p.s.u.]')
fig.savefig(G.figoutputdir+'Simple.png')
| acapet/GHER-POSTPROC | Examples/EasyExample.py | Python | gpl-3.0 | 2,826 | [
"NetCDF"
] | cc6f31258ba4f6560cc76c6e3144bb1c689a031b1c81aaaeba513233acf85f66 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module runs a dendritic network in continual learning setting where each task
consists of learning to classify samples drawn from one of two multivariate normal
distributions.
Dendritic weights can either be hardcoded (to induce overlapping or non-overlapping
subnetworks) or learned. All output heads are used for both training and inference.
Usage: adjust the config parameters `kw`, `dendrite_sparsity`, `weight_init`,
`dendrite_init`, and `freeze_dendrites` (all in `model_args`).
"""
import pprint
import time
import numpy as np
import torch
import torch.nn.functional as F
from nupic.research.frameworks.continual_learning.experiments import (
ContinualLearningExperiment,
)
from nupic.research.frameworks.dendrites.modules import DendriticMLP
from nupic.research.frameworks.vernon import mixins
from nupic.torch.duty_cycle_metrics import max_entropy
from projects.dendrites.gaussian_classification.gaussian import GaussianDataset
# ------ Experiment class
class DendritesExperiment(mixins.RezeroWeights,
ContinualLearningExperiment):
def setup_experiment(self, config):
super().setup_experiment(config)
# Manually set dendritic weights to invoke subnetworks; if the user sets
# `freeze_dendrites=True`, we assume dendritic weights are intended to be
# hardcoded
if self.model.freeze_dendrites:
self.model.hardcode_dendritic_weights(
context_vectors=self.train_loader.dataset._contexts, init="overlapping"
)
# ------ Training & evaluation function
def train_model(exp):
# Assume `loader` yields 3-item tuples of the form (data, context, target)
exp.model.train()
for (data, context), target in exp.train_loader:
data = data.to(exp.device)
context = context.to(exp.device)
target = target.to(exp.device)
exp.optimizer.zero_grad()
output = exp.model(data, context)
# Outputs are placed through a log softmax since `error_loss` is `F.nll_loss`,
# which assumes it will receive 'logged' values
output = F.log_softmax(output)
error_loss = exp.error_loss(output, target)
error_loss.backward()
exp.optimizer.step()
# Rezero weights if necessary
exp.post_optimizer_step(exp.model)
def evaluate_model(exp):
# Assume `loader` yields 3-item tuples of the form (data, context, target)
exp.model.eval()
total = 0
loss = torch.tensor(0., device=exp.device)
correct = torch.tensor(0, device=exp.device)
with torch.no_grad():
for (data, context), target in exp.val_loader:
data = data.to(exp.device)
context = context.to(exp.device)
target = target.to(exp.device)
output = exp.model(data, context)
# All output units are used to compute loss / accuracy
loss += exp.error_loss(output, target)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum()
total += len(data)
mean_acc = torch.true_divide(correct, total).item() if total > 0 else 0,
return mean_acc
def run_experiment(config):
exp_class = config["experiment_class"]
exp = exp_class()
exp.setup_experiment(config)
exp.model = exp.model.to(exp.device)
# --------------------------- CONTINUAL LEARNING PHASE -------------------------- #
for task_id in range(num_tasks):
# Train model on current task
t1 = time.time()
exp.train_loader.sampler.set_active_tasks(task_id)
for _epoch_id in range(num_epochs):
train_model(exp)
t2 = time.time()
print(f"train time [task {task_id}]: {t2 - t1}")
# Evaluate model accuracy on each task separately
if task_id in config["epochs_to_validate"]:
print(f"\n=== AFTER TASK {task_id} ===\n")
for eval_task_id in range(task_id + 1):
exp.val_loader.sampler.set_active_tasks(eval_task_id)
acc_task = evaluate_model(exp)
if isinstance(acc_task, tuple):
acc_task = acc_task[0]
print(f"task {eval_task_id} accuracy: {acc_task}")
t3 = time.time()
print(f"\nevaluation time: {t3 - t2}")
print(f"====================\n")
# ------------------------------------------------------------------------------- #
# Report final aggregate accuracy
exp.val_loader.sampler.set_active_tasks(range(num_tasks))
acc_task = evaluate_model(exp)
if isinstance(acc_task, tuple):
acc_task = acc_task[0]
print(f"Final test accuracy: {acc_task}")
# Print entropy of layers
max_possible_entropy = max_entropy(exp.model.hidden_size,
int(0.05 * exp.model.hidden_size))
if exp.model.kw:
print(f" KW1 entropy: {exp.model.kw1.entropy().item()}")
print(f" KW2 entropy: {exp.model.kw2.entropy().item()}")
print(f" max entropy: {max_possible_entropy}")
print("")
if __name__ == "__main__":
num_tasks = 50
num_epochs = 1 # Number of training epochs per task
config = dict(
experiment_class=DendritesExperiment,
dataset_class=GaussianDataset,
dataset_args=dict(
num_classes=2 * num_tasks,
num_tasks=num_tasks,
training_examples_per_class=2500,
validation_examples_per_class=500,
dim_x=2048,
dim_context=2048,
seed=np.random.randint(0, 1000),
),
model_class=DendriticMLP,
model_args=dict(
input_size=2048,
output_size=2 * num_tasks,
hidden_size=2048,
num_segments=num_tasks,
dim_context=2048,
kw=True, # Turning on k-Winners when hardcoding dendrites to induce
# non-overlapping subnetworks results in 5% winners
dendrite_sparsity=0.0, # Irrelevant if `freeze_dendrites=True`
weight_init="modified", # Must be one of {"kaiming", "modified"}
dendrite_init="modified", # Irrelevant if `freeze_dendrites=True`
freeze_dendrites=False
),
batch_size=64,
val_batch_size=512,
epochs=num_epochs,
epochs_to_validate=[0, 3, 6, 10, 20, num_tasks - 1],
num_tasks=num_tasks,
num_classes=2 * num_tasks,
distributed=False,
seed=np.random.randint(0, 10000),
optimizer_class=torch.optim.SGD,
optimizer_args=dict(lr=2e-1),
)
print("Experiment config: ")
pprint.pprint(config)
print("")
run_experiment(config)
| numenta/nupic.research | projects/dendrites/gaussian_classification/run_dendritic_network.py | Python | agpl-3.0 | 7,724 | [
"Gaussian"
] | 609db8a3d3f9a5fab63ae65fde847d5ffeb67ef4cac6b6dce87d4b60cb790a2c |
# coding: utf-8
"""0MQ Socket pure Python methods."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import random
import codecs
import zmq
from zmq.backend import Socket as SocketBase
from .poll import Poller
from . import constants
from .attrsettr import AttributeSetter
from zmq.error import ZMQError, ZMQBindError
from zmq.utils import jsonapi
from zmq.utils.strtypes import bytes,unicode,basestring
from .constants import (
SNDMORE, ENOTSUP, POLLIN,
int64_sockopt_names,
int_sockopt_names,
bytes_sockopt_names,
)
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Socket(SocketBase, AttributeSetter):
"""The ZMQ socket object
To create a Socket, first create a Context::
ctx = zmq.Context.instance()
then call ``ctx.socket(socket_type)``::
s = ctx.socket(zmq.ROUTER)
"""
#-------------------------------------------------------------------------
# Hooks for sockopt completion
#-------------------------------------------------------------------------
def __dir__(self):
keys = dir(self.__class__)
for collection in (
bytes_sockopt_names,
int_sockopt_names,
int64_sockopt_names,
):
keys.extend(collection)
return keys
#-------------------------------------------------------------------------
# Getting/Setting options
#-------------------------------------------------------------------------
setsockopt = SocketBase.set
getsockopt = SocketBase.get
def set_string(self, option, optval, encoding='utf-8'):
"""set socket options with a unicode object
This is simply a wrapper for setsockopt to protect from encoding ambiguity.
See the 0MQ documentation for details on specific options.
Parameters
----------
option : int
The name of the option to set. Can be any of: SUBSCRIBE,
UNSUBSCRIBE, IDENTITY
optval : unicode string (unicode on py2, str on py3)
The value of the option to set.
encoding : str
The encoding to be used, default is utf8
"""
if not isinstance(optval, unicode):
raise TypeError("unicode strings only")
return self.set(option, optval.encode(encoding))
setsockopt_unicode = setsockopt_string = set_string
def get_string(self, option, encoding='utf-8'):
"""get the value of a socket option
See the 0MQ documentation for details on specific options.
Parameters
----------
option : int
The option to retrieve.
Returns
-------
optval : unicode string (unicode on py2, str on py3)
The value of the option as a unicode string.
"""
if option not in constants.bytes_sockopts:
raise TypeError("option %i will not return a string to be decoded"%option)
return self.getsockopt(option).decode(encoding)
getsockopt_unicode = getsockopt_string = get_string
def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
"""bind this socket to a random port in a range
Parameters
----------
addr : str
The address string without the port to pass to ``Socket.bind()``.
min_port : int, optional
The minimum port in the range of ports to try (inclusive).
max_port : int, optional
The maximum port in the range of ports to try (exclusive).
max_tries : int, optional
The maximum number of bind attempts to make.
Returns
-------
port : int
The port the socket was bound to.
Raises
------
ZMQBindError
if `max_tries` reached before successful bind
"""
for i in range(max_tries):
try:
port = random.randrange(min_port, max_port)
self.bind('%s:%s' % (addr, port))
except ZMQError as exception:
if not exception.errno == zmq.EADDRINUSE:
raise
else:
return port
raise ZMQBindError("Could not bind socket to random port.")
def get_hwm(self):
"""get the High Water Mark
On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM
"""
major = zmq.zmq_version_info()[0]
if major >= 3:
# return sndhwm, fallback on rcvhwm
try:
return self.getsockopt(zmq.SNDHWM)
except zmq.ZMQError as e:
pass
return self.getsockopt(zmq.RCVHWM)
else:
return self.getsockopt(zmq.HWM)
def set_hwm(self, value):
"""set the High Water Mark
On libzmq ≥ 3, this sets both SNDHWM and RCVHWM
"""
major = zmq.zmq_version_info()[0]
if major >= 3:
raised = None
try:
self.sndhwm = value
except Exception as e:
raised = e
try:
self.rcvhwm = value
except Exception:
raised = e
if raised:
raise raised
else:
return self.setsockopt(zmq.HWM, value)
hwm = property(get_hwm, set_hwm,
"""property for High Water Mark
Setting hwm sets both SNDHWM and RCVHWM as appropriate.
It gets SNDHWM if available, otherwise RCVHWM.
"""
)
#-------------------------------------------------------------------------
# Sending and receiving messages
#-------------------------------------------------------------------------
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""send a sequence of buffers as a multipart message
The zmq.SNDMORE flag is added to all msg parts before the last.
Parameters
----------
msg_parts : iterable
A sequence of objects to send as a multipart message. Each element
can be any sendable object (Frame, bytes, buffer-providers)
flags : int, optional
SNDMORE is handled automatically for frames before the last.
copy : bool, optional
Should the frame(s) be sent in a copying or non-copying manner.
track : bool, optional
Should the frame(s) be tracked for notification that ZMQ has
finished with it (ignored if copy=True).
Returns
-------
None : if copy or not track
MessageTracker : if track and not copy
a MessageTracker object, whose `pending` property will
be True until the last send is completed.
"""
for msg in msg_parts[:-1]:
self.send(msg, SNDMORE|flags, copy=copy, track=track)
# Send the last part without the extra SNDMORE flag.
return self.send(msg_parts[-1], flags, copy=copy, track=track)
def recv_multipart(self, flags=0, copy=True, track=False):
"""receive a multipart message as a list of bytes or Frame objects
Parameters
----------
flags : int, optional
Any supported flag: NOBLOCK. If NOBLOCK is set, this method
will raise a ZMQError with EAGAIN if a message is not ready.
If NOBLOCK is not set, then this method will block until a
message arrives.
copy : bool, optional
Should the message frame(s) be received in a copying or non-copying manner?
If False a Frame object is returned for each part, if True a copy of
the bytes is made for each frame.
track : bool, optional
Should the message frame(s) be tracked for notification that ZMQ has
finished with it? (ignored if copy=True)
Returns
-------
msg_parts : list
A list of frames in the multipart message; either Frames or bytes,
depending on `copy`.
"""
parts = [self.recv(flags, copy=copy, track=track)]
# have first part already, only loop while more to receive
while self.getsockopt(zmq.RCVMORE):
part = self.recv(flags, copy=copy, track=track)
parts.append(part)
return parts
def send_string(self, u, flags=0, copy=False, encoding='utf-8'):
"""send a Python unicode string as a message with an encoding
0MQ communicates with raw bytes, so you must encode/decode
text (unicode on py2, str on py3) around 0MQ.
Parameters
----------
u : Python unicode string (unicode on py2, str on py3)
The unicode string to send.
flags : int, optional
Any valid send flag.
encoding : str [default: 'utf-8']
The encoding to be used
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, copy=copy)
send_unicode = send_string
def recv_string(self, flags=0, encoding='utf-8'):
"""receive a unicode string, as sent by send_string
Parameters
----------
flags : int
Any valid recv flag.
encoding : str [default: 'utf-8']
The encoding to be used
Returns
-------
s : unicode string (unicode on py2, str on py3)
The Python unicode string that arrives as encoded bytes.
"""
msg = self.recv(flags=flags, copy=False)
return codecs.decode(msg.bytes, encoding)
recv_unicode = recv_string
def send_pyobj(self, obj, flags=0, protocol=-1):
"""send a Python object as a message using pickle to serialize
Parameters
----------
obj : Python object
The Python object to send.
flags : int
Any valid send flag.
protocol : int
The pickle protocol number to use. Default of -1 will select
the highest supported number. Use 0 for multiple platform
support.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags)
def recv_pyobj(self, flags=0):
"""receive a Python object as a message using pickle to serialize
Parameters
----------
flags : int
Any valid recv flag.
Returns
-------
obj : Python object
The Python object that arrives as a message.
"""
s = self.recv(flags)
return pickle.loads(s)
def send_json(self, obj, flags=0):
"""send a Python object as a message using json to serialize
Parameters
----------
obj : Python object
The Python object to send.
flags : int
Any valid send flag.
"""
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags)
def recv_json(self, flags=0):
"""receive a Python object as a message using json to serialize
Parameters
----------
flags : int
Any valid recv flag.
Returns
-------
obj : Python object
The Python object that arrives as a message.
"""
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = self.recv(flags)
return jsonapi.loads(msg)
_poller_class = Poller
def poll(self, timeout=None, flags=POLLIN):
"""poll the socket for events
The default is to poll forever for incoming
events. Timeout is in milliseconds, if specified.
Parameters
----------
timeout : int [default: None]
The timeout (in milliseconds) to wait for an event. If unspecified
(or specified None), will wait forever for an event.
flags : bitfield (int) [default: POLLIN]
The event flags to poll for (any combination of POLLIN|POLLOUT).
The default is to check for incoming events (POLLIN).
Returns
-------
events : bitfield (int)
The events that are ready and waiting. Will be 0 if no events were ready
by the time timeout was reached.
"""
if self.closed:
raise ZMQError(ENOTSUP)
p = self._poller_class()
p.register(self, flags)
evts = dict(p.poll(timeout))
# return 0 if no events, otherwise return event bitfield
return evts.get(self, 0)
def get_monitor_socket(self, events=None, addr=None):
"""Return a connected PAIR socket ready to receive the event notifications.
.. versionadded:: libzmq-4.0
.. versionadded:: 14.0
Parameters
----------
events : bitfield (int) [default: ZMQ_EVENTS_ALL]
The bitmask defining which events are wanted.
addr : string [default: None]
The optional endpoint for the monitoring sockets.
Returns
-------
socket : (PAIR)
The socket is already connected and ready to receive messages.
"""
# safe-guard, method only available on libzmq >= 4
if zmq.zmq_version_info() < (4,):
raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version())
if addr is None:
# create endpoint name from internal fd
addr = "inproc://monitor.s-%d" % self.FD
if events is None:
# use all events
events = zmq.EVENT_ALL
# attach monitoring socket
self.monitor(addr, events)
# create new PAIR socket and connect it
ret = self.context.socket(zmq.PAIR)
ret.connect(addr)
return ret
__all__ = ['Socket']
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/zmq/sugar/socket.py | Python | gpl-2.0 | 14,978 | [
"Brian"
] | 3719db0a732f12534f95f5612f434b72b648af3a281db890936d39b20b149a80 |
# -*- coding: utf-8 -*-
"""This module contains helper functions for other IO functions."""
from .exc import ImportVersionWarning
from ..config import PYBEL_MINIMUM_IMPORT_VERSION
from ..struct import BELGraph
from ..utils import tokenize_version
def raise_for_old_graph(graph):
"""Raise an ImportVersionWarning if the BEL graph was produced by a legacy version of PyBEL.
:param graph: A BEL graph
:raises ImportVersionWarning: If the BEL graph was produced by a legacy version of PyBEL
"""
graph_version = tokenize_version(graph.pybel_version)
if graph_version < PYBEL_MINIMUM_IMPORT_VERSION:
raise ImportVersionWarning(graph_version, PYBEL_MINIMUM_IMPORT_VERSION)
def raise_for_not_bel(graph):
"""Raise a TypeError if the argument is not a BEL graph.
:param graph: A BEL graph
:raises TypeError: If the argument is not a BEL graph
"""
if not isinstance(graph, BELGraph):
raise TypeError("Not a BELGraph: {}".format(graph))
def ensure_version(graph: BELGraph, check_version: bool = True) -> BELGraph:
"""Ensure that the graph was produced by a minimum of PyBEL v:data:`PYBEL_MINIMUM_IMPORT_VERSION`.
This variable is defined by last release with a change in the graph data definition.
:param graph: A BEL Graph
:param check_version: Should the version be checked, or should the graph just be returned without inspection
:raises ImportVersionWarning: If the BEL graph was produced by a legacy version of PyBEL
"""
if check_version:
raise_for_old_graph(graph)
return graph
| pybel/pybel | src/pybel/io/utils.py | Python | mit | 1,585 | [
"Pybel"
] | 9d2255790d9ab3b98395ca0a681eeccabb229d6f00f2cbba64c503efba6f70fc |
"""
==========================================
Affine Registration in 3D
==========================================
This example explains how to compute an affine transformation to register two
3D volumes by maximization of their Mutual Information [Mattes03]_. The
optimization strategy is similar to that implemented in ANTS [Avants11]_.
"""
import numpy as np
from dipy.viz import regtools
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.data.fetcher import fetch_syn_data, read_syn_data
from dipy.align.imaffine import (transform_centers_of_mass,
AffineMap,
MutualInformationMetric,
AffineRegistration)
from dipy.align.transforms import (TranslationTransform3D,
RigidTransform3D,
AffineTransform3D)
"""
Let's fetch two b0 volumes, the static image will be the b0 from the Stanford
HARDI dataset
"""
fetch_stanford_hardi()
nib_stanford, gtab_stanford = read_stanford_hardi()
static = np.squeeze(nib_stanford.get_data())[..., 0]
static_grid2world = nib_stanford.get_affine()
"""
Now the moving image
"""
fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
moving = np.array(nib_syn_b0.get_data())
moving_grid2world = nib_syn_b0.get_affine()
"""
We can see that the images are far from aligned by drawing one on top of
the other. The images don't even have the same number of voxels, so in order
to draw one on top of the other we need to resample the moving image on a grid
of the same dimensions as the static image, we can do this by "transforming"
the moving image using an identity transform
"""
identity = np.eye(4)
affine_map = AffineMap(identity,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
resampled = affine_map.transform(moving)
regtools.overlay_slices(static, resampled, None, 0,
"Static", "Moving", "resampled_0.png")
regtools.overlay_slices(static, resampled, None, 1,
"Static", "Moving", "resampled_1.png")
regtools.overlay_slices(static, resampled, None, 2,
"Static", "Moving", "resampled_2.png")
"""
.. figure:: resampled_0.png
:align: center
.. figure:: resampled_1.png
:align: center
.. figure:: resampled_2.png
:align: center
**Input images before alignment**.
"""
"""
We can obtain a very rough (and fast) registration by just aligning the centers
of mass of the two images
"""
c_of_mass = transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world)
"""
We can now transform the moving image and draw it on top of the static image,
registration is not likely to be good, but at least they will occupy roughly
the same space
"""
transformed = c_of_mass.transform(moving)
regtools.overlay_slices(static, transformed, None, 0,
"Static", "Transformed", "transformed_com_0.png")
regtools.overlay_slices(static, transformed, None, 1,
"Static", "Transformed", "transformed_com_1.png")
regtools.overlay_slices(static, transformed, None, 2,
"Static", "Transformed", "transformed_com_2.png")
"""
.. figure:: transformed_com_0.png
:align: center
.. figure:: transformed_com_1.png
:align: center
.. figure:: transformed_com_2.png
:align: center
**Registration result by aligning the centers of mass of the images**.
"""
"""
This was just a translation of the moving image towards the static image, now
we will refine it by looking for an affine transform. We first create the
similarity metric (Mutual Information) to be used. We need to specify the
number of bins to be used to discretize the joint and marginal probability
distribution functions (PDF), a typical value is 32. We also need to specify
the percentage (an integer in (0, 100]) of voxels to be used for computing the
PDFs, the most accurate registration will be obtained by using all voxels, but
it is also the most time-consuming choice. We specify full sampling by passing
None instead of an integer
"""
nbins = 32
sampling_prop = None
metric = MutualInformationMetric(nbins, sampling_prop)
"""
To avoid getting stuck at local optima, and to accelerate convergence, we use a
multi-resolution strategy (similar to ANTS [Avants11]_) by building a Gaussian
Pyramid. To have as much flexibility as possible, the user can specify how this
Gaussian Pyramid is built. First of all, we need to specify how many
resolutions we want to use. This is indirectly specified by just providing a
list of the number of iterations we want to perform at each resolution. Here we
will just specify 3 resolutions and a large number of iterations, 10000 at the
coarsest resolution, 1000 at the medium resolution and 100 at the finest. These
are the default settings
"""
level_iters = [10000, 1000, 100]
"""
To compute the Gaussian pyramid, the original image is first smoothed at each
level of the pyramid using a Gaussian kernel with the requested sigma. A good
initial choice is [3.0, 1.0, 0.0], this is the default
"""
sigmas = [3.0, 1.0, 0.0]
"""
Now we specify the sub-sampling factors. A good configuration is [4, 2, 1],
which means that, if the original image shape was (nx, ny, nz) voxels, then the
shape of the coarsest image will be about (nx//4, ny//4, nz//4), the shape in
the middle resolution will be about (nx//2, ny//2, nz//2) and the image at the
finest scale has the same size as the original image. This set of factors is
the default
"""
factors = [4, 2, 1]
"""
Now we go ahead and instantiate the registration class with the configuration
we just prepared
"""
affreg = AffineRegistration(metric=metric,
level_iters=level_iters,
sigmas=sigmas,
factors=factors)
"""
Using AffineRegistration we can register our images in as many stages as we
want, providing previous results as initialization for the next (the same logic
as in ANTS). The reason why it is useful is that registration is a non-convex
optimization problem (it may have more than one local optima), which means that
it is very important to initialize as close to the solution as possible. For
example, lets start with our (previously computed) rough transformation
aligning the centers of mass of our images, and then refine it in three stages.
First look for an optimal translation. The dictionary regtransforms contains
all available transforms, we obtain one of them by providing its name and the
dimension (either 2 or 3) of the image we are working with (since we are
aligning volumes, the dimension is 3)
"""
transform = TranslationTransform3D()
params0 = None
starting_affine = c_of_mass.affine
translation = affreg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
"""
If we look at the result, we can see that this translation is much better than
simply aligning the centers of mass
"""
transformed = translation.transform(moving)
regtools.overlay_slices(static, transformed, None, 0,
"Static", "Transformed", "transformed_trans_0.png")
regtools.overlay_slices(static, transformed, None, 1,
"Static", "Transformed", "transformed_trans_1.png")
regtools.overlay_slices(static, transformed, None, 2,
"Static", "Transformed", "transformed_trans_2.png")
"""
.. figure:: transformed_trans_0.png
:align: center
.. figure:: transformed_trans_1.png
:align: center
.. figure:: transformed_trans_2.png
:align: center
**Registration result by translating the moving image, using MI**.
"""
"""
Now lets refine with a rigid transform (this may even modify our previously
found optimal translation)
"""
transform = RigidTransform3D()
params0 = None
starting_affine = translation.affine
rigid = affreg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
"""
This produces a slight rotation, and the images are now better aligned
"""
transformed = rigid.transform(moving)
regtools.overlay_slices(static, transformed, None, 0,
"Static", "Transformed", "transformed_rigid_0.png")
regtools.overlay_slices(static, transformed, None, 1,
"Static", "Transformed", "transformed_rigid_1.png")
regtools.overlay_slices(static, transformed, None, 2,
"Static", "Transformed", "transformed_rigid_2.png")
"""
.. figure:: transformed_rigid_0.png
:align: center
.. figure:: transformed_rigid_1.png
:align: center
.. figure:: transformed_rigid_2.png
:align: center
**Registration result with a rigid transform, using Mutual Information**.
"""
"""
Finally, lets refine with a full affine transform (translation, rotation, scale
and shear), it is safer to fit more degrees of freedom now, since we must be
very close to the optimal transform
"""
transform = AffineTransform3D()
params0 = None
starting_affine = rigid.affine
affine = affreg.optimize(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine=starting_affine)
"""
This results in a slight shear and scale
"""
transformed = affine.transform(moving)
regtools.overlay_slices(static, transformed, None, 0,
"Static", "Transformed", "transformed_affine_0.png")
regtools.overlay_slices(static, transformed, None, 1,
"Static", "Transformed", "transformed_affine_1.png")
regtools.overlay_slices(static, transformed, None, 2,
"Static", "Transformed", "transformed_affine_2.png")
"""
.. figure:: transformed_affine_0.png
:align: center
.. figure:: transformed_affine_1.png
:align: center
.. figure:: transformed_affine_2.png
:align: center
**Registration result with an affine transform, using Mutual Information**.
.. [Mattes03] Mattes, D., Haynor, D. R., Vesselle, H., Lewellen, T. K.,
Eubank, W. (2003). PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical Imaging,
22(1), 120-8.
.. [Avants11] Avants, B. B., Tustison, N., & Song, G. (2011). Advanced
Normalization Tools ( ANTS ), 1-35.
.. include:: ../links_names.inc
"""
| StongeEtienne/dipy | doc/examples/affine_registration_3d.py | Python | bsd-3-clause | 10,585 | [
"Gaussian"
] | e8932a2aad821a823ace2548b56666cff73a1aecf05f3ae0610524daa7a70de6 |
# vi:sts=4:sw=4:et
"""Code for parsing OpenEmbedded license strings"""
import ast
import re
from fnmatch import fnmatchcase as fnmatch
class LicenseError(StandardError):
pass
class LicenseSyntaxError(LicenseError):
def __init__(self, licensestr, exc):
self.licensestr = licensestr
self.exc = exc
LicenseError.__init__(self)
def __str__(self):
return "error in '%s': %s" % (self.licensestr, self.exc)
class InvalidLicense(LicenseError):
def __init__(self, license):
self.license = license
LicenseError.__init__(self)
def __str__(self):
return "invalid characters in license '%s'" % self.license
license_operator = re.compile('([&|() ])')
license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Syntax tree visitor which can accept OpenEmbedded license strings"""
def visit_string(self, licensestr):
new_elements = []
elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos-1]):
new_elements.append('&')
element = '"' + element + '"'
elif not license_operator.match(element):
raise InvalidLicense(element)
new_elements.append(element)
self.visit(ast.parse(' '.join(new_elements)))
class FlattenVisitor(LicenseVisitor):
"""Flatten a license tree (parsed from a string) by selecting one of each
set of OR options, in the way the user specifies"""
def __init__(self, choose_licenses):
self.choose_licenses = choose_licenses
self.licenses = []
LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.licenses.append(node.s)
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
left.visit(node.left)
right = FlattenVisitor(self.choose_licenses)
right.visit(node.right)
selected = self.choose_licenses(left.licenses, right.licenses)
self.licenses.extend(selected)
else:
self.generic_visit(node)
def flattened_licenses(licensestr, choose_licenses):
"""Given a license string and choose_licenses function, return a flat list of licenses"""
flatten = FlattenVisitor(choose_licenses)
try:
flatten.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
def is_included(licensestr, whitelist=None, blacklist=None):
"""Given a license string and whitelist and blacklist, determine if the
license string matches the whitelist and does not match the blacklist.
Returns a tuple holding the boolean state and a list of the applicable
licenses which were excluded (or None, if the state is True)
"""
def include_license(license):
return any(fnmatch(license, pattern) for pattern in whitelist)
def exclude_license(license):
return any(fnmatch(license, pattern) for pattern in blacklist)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses)."""
alpha_weight = len(filter(include_license, alpha))
beta_weight = len(filter(include_license, beta))
if alpha_weight > beta_weight:
return alpha
else:
return beta
if not whitelist:
whitelist = ['*']
if not blacklist:
blacklist = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = filter(lambda lic: exclude_license(lic), licenses)
included = filter(lambda lic: include_license(lic), licenses)
if excluded:
return False, excluded
else:
return True, included
| PhiInnovations/mdp28-linux-bsp | openembedded-core/meta/lib/oe/license.py | Python | mit | 3,982 | [
"VisIt"
] | 6087b0d3160598305d153cf8f2534a61a7959f636858bb480cefad9a689204b4 |
import discord
import asyncio
import wolframalpha
import brainfuck
import youtube_dl
import subprocess
import os
import urbandict
import xmltodict
import urllib.request
from ctypes.util import find_library
from discord.ext import commands
from discord.ext.commands import formatter
from unidecode import unidecode
import re
import markovify
from translate import translate
import threading
from shelve import DbfilenameShelf
import atexit
import execjs
import js2py
import json
import multiprocessing
import builtins
import pickle
import shelve
def nonAsyncRun(function, args):
loop = asyncio.get_event_loop()
loop.call_soon_threadsafe(asyncio.async, function(*args))
def printToDiscord(clientObj, channel, text):
nonAsyncRun(clientObj.send_message,(channel,text))
async def checkOp(message):
operators = getShelfSlot(message.server.id, "Operators")
userPerms = message.author.permissions_in(message.channel)
if message.author.id in operators.keys() or userPerms.administrator == True or userPerms.manage_server == True or message.author.id == "129757604506370048":
return True
else:
await client.send_message(message.channel,"You are not a bot operator, so you cannot use this command.")
return False
def checkOpNonAsync(message):
operators = getShelfSlot(message.server.id, "Operators")
userPerms = message.author.permissions_in(message.channel)
if message.author.id in operators.keys() or userPerms.administrator == True or userPerms.manage_server == True or message.author.id == "129757604506370048":
return True
else:
return False
def getToken(service, id = None):
#Make a JSON file containing your tokens, like:
#{
#"discord":"[TOKEN]",
#"wolframalpha":"[TOKEN]",
#"googleimages":"[TOKEN]"
#}
tokenFile = open('botconfig', 'r')
tokenJSON = json.loads(tokenFile.read())
if id == None or service == "discord":
return tokenJSON[service]
else:
slot = getShelfSlot(id, "Tokens")
try:
tokenReturn = slot[service]
slot.close()
return slot[service]
except:
slot.close()
return tokenJSON[service]
class AutoSyncShelf(DbfilenameShelf):
def __init__(self, filename, protocol=2, writeback=True):
DbfilenameShelf.__init__(self, filename, protocol=protocol, writeback=writeback)
def __setitem__(self, key, value):
DbfilenameShelf.__setitem__(self, key, value)
self.sync()
def __delitem__(self, key):
DbfilenameShelf.__delitem__(self, key)
self.sync()
class CustomDict(dict):
def __init__(self, name, newData = None):
self._name = name
self._dict = {}
if newData != None:
self._dict = newData
self.sync()
return
try:
with open(self._name + ".dat",'rb') as f:
self._dict = pickle.load(f)
except:
pass
def close(self):
return
def sync(self):
with open(self._name + ".dat",'wb') as f:
pickle.dump(self._dict, f, protocol=pickle.HIGHEST_PROTOCOL)
def __setitem__(self, key, item):
self._dict[key] = item
self.sync()
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
return repr(self._dict)
def __len__(self):
return len(self._dict)
def __delitem__(self, key):
del self._dict[key]
self.sync()
def clear(self):
return self._dict.clear()
def copy(self):
return self._dict.copy()
def has_key(self, k):
return self._dict.has_key(k)
def pop(self, k, d=None):
self._dict.pop(k, d)
self.sync()
return self._dict
def update(self, *args, **kwargs):
self._dict.update(*args, **kwargs)
self.sync()
return self._dict
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def pop(self, *args):
self._dict.pop(*args)
self.sync()
return self._dict
def __cmp__(self, dict):
return cmp(self._dict, dict)
def __contains__(self, item):
return item in self._dict
def __iter__(self):
return iter(self._dict)
def __unicode__(self):
return unicode(repr(self._dict))
def getShelfSlot(serverID, name):
try:
os.makedirs(os.path.join("ServerData/" + serverID))
except:
pass
#return shelve.open(os.path.join("ServerData/" + serverID + "/", name), writeback=True)
#return AutoSyncShelf(os.path.join("ServerData/" + serverID + "/", name))
#with shelve.open(os.path.join("ServerData/" + serverID + "/", name), writeback=True) as newData:
# dataToApply = {}
# for key, value in newData.items():
# dataToApply[key] = value
# newDict = CustomDict(os.path.join("ServerData/" + serverID + "/", name), dataToApply)
# return newDict
return CustomDict(os.path.join("ServerData/" + serverID + "/", name))
def getPrefix(bot, message):
try:
if message.server.id not in prefixDict:
slot = getShelfSlot(message.server.id, "Prefix")
prefixDict.update({message.server.id:slot["Prefix"]})
slot.close()
return prefixDict[message.server.id]
except:
return "$"
#Discord Client
class NewFormatter(formatter.HelpFormatter):
def get_ending_note(self):
command_name = self.context.invoked_with
return "Type {0}{1} command for more info on a command.\n" \
"You can also type {0}{1} category for more info on a category.\n" \
"Visit http://tesseractc.at/discord/help for more command help.".format(self.clean_prefix, command_name)
help_formatter = NewFormatter(width=100)
client = commands.Bot(command_prefix=getPrefix, description='Tesseract Multipurpose Bot', pm_help = True, formatter = help_formatter)
loop = asyncio.get_event_loop()
#Load Discord Opus
discord.opus.load_opus(find_library("opus"))
#Cogs to load
cogs = ["utilities", "stalk","voting","pastebin","customcommands","customanimations","botactions","musicactions","imageactions","cards","rss","weather","useractions", "ranks"]
#Load settings
opCommands = ["setnick", "setavatar", "skip", "setrank", "setrankbyname", "op", "deop", "reloadextension", "giverole", "giveroleatrank", "setevent", "clearqueue"]
adminCommands = ["setavatar"]
#Prefix dict
prefixDict = {}
@client.check
def whitelist(ctx):
print("Command run!: " + ctx.command.name)
if ctx.command.name in adminCommands:
if ctx.message.author.id != "129757604506370048":
return False
if ctx.command.name in opCommands:
return checkOpNonAsync(ctx.message)
else:
return True
@client.event
async def on_ready():
print('Logged in!')
print(client.user.name)
print(client.user.id)
print('---')
print('\nLoading extensions: ')
for cog in cogs:
print(cog)
try:
client.load_extension(cog)
except:
pass
print('\n\nChanging username...')
await client.edit_profile(username="Doggo Bot")
print('\nDone!')
@client.event
async def on_message(message):
if message.author.bot == True:
return
try:
print(message.author.name + "@" + message.server.name + "~" + message.channel.name + ": " + message.content)
except:
pass
if bool(re.search("^\\{}[a-zA-Z0-9]+\\b".format(client.command_prefix(client, message)),message.content)):
print("Sending typing...")
await client.send_typing(message.channel)
if client.user.id in message.content:
await bot(message)
await client.process_commands(message)
#@client.event
#async def on_error(event,*args,**kwargs):
# print("ERROR " + str(event) + " WITH ARGS: " + args)
# await client.send_message(args[0].channel,"Error in: " + str(event))
@client.event
async def on_channel_update(oldChannel, channel):
if oldChannel.topic != channel.topic:
await client.send_message(channel, "**{}**'s channel topic changed from **{}** to **{}**".format(channel.name,oldChannel.topic, channel.topic))
if oldChannel.name != channel.name:
await client.send_message(channel, "**{}**'s channel name changed to **{}**".format(oldChannel.name,channel.name))
@client.event
async def on_member_ban(member):
announceChannel = getShelfSlot(member.server.id, "AnnounceChannel")
await client.send_message(announceChannel["channel"],"User **{}** has been banned!".format(member.name))
announceChannel.close()
@client.event
async def on_member_unban(server, member):
announceChannel = getShelfSlot(server.id, "AnnounceChannel")
await client.send_message(announceChannel["channel"],"User **{}** has been unbanned!".format(member.name))
announceChannel.close()
@client.command(pass_context = True, aliases = ["sac"])
async def setannouncementchannel(ctx, channel : discord.Channel = None):
"""Set a channel to announce bans and unbans! Run this command with no channel to reset the channel"""
announceChannel = getShelfSlot(ctx.message.server.id, "AnnounceChannel")
announceChannel["channel"] = channel
announceChannel.close()
await client.say("The bot's announcement channel is now set to **{}**".format(channel.name))
@client.command(pass_context = True, aliases = ["stfs"])
async def settokenforservice(ctx, service : str = None, *, token : str = None):
"""Set a token for apis, run without any parameters for a list"""
if service == None and token == None:
await client.say("**--- API Keys/Tokens to set ---**\nDiscord\nWolframAlpha\nImgurId and ImgurSecret\nYoutubeSearch\nGoogleImageSearch")
return
slot = getShelfSlot(id, "Tokens")
slot[service.lower()] = token
slot.close()
@client.command(pass_context = True, aliases = ["spr"])
async def setprefix(ctx, *, prefix : str = "$"):
slot = getShelfSlot(ctx.message.server.id, "Prefix")
slot["Prefix"] = prefix
prefixDict.update({ctx.message.server.id:slot["Prefix"]})
slot.close()
await client.say("Prefix set!")
async def bot(message):
"""Commune with the bot!"""
await client.send_typing(message.channel)
mitsukuResponse = subprocess.run(["node","mitsuku.js",message.content.split(" ",1)[1]], stdout=subprocess.PIPE).stdout
if message.server.me.nick != None:
mitsukuResponse = re.compile(re.escape('mitsuku'), re.IGNORECASE).sub(message.server.me.nick,str(mitsukuResponse))
else:
mitsukuResponse = re.compile(re.escape('mitsuku'), re.IGNORECASE).sub(message.server.me.name,str(mitsukuResponse))
await client.send_message(message.channel, "<@{0}".format(message.author.id) + "> " + str(mitsukuResponse)[2:][:-3])
#Eval Funcs
def safeEval(code, args = {}, pyimports = [], acceptableWaitTime = 1):
manager = multiprocessing.Manager()
ret = manager.dict()
#print("Evaluation code with wait time: {}".format(acceptableWaitTime))
p = multiprocessing.Process(target=doEval, name="doEval", args = (code, ret, args, pyimports))
p.start()
p.join(acceptableWaitTime)
if p.is_alive():
p.terminate()
p.join()
try:
return ret["result"]
except:
return None
def doEval(code, ret, args = {}, pyimports = []):
if pyimports != []:
pyimportcode = "pyimport " + ";\npyimport ".join(pyimports) + ";\n"
else:
pyimportcode = "";
codeToRun = pyimportcode + "function cc() {" + code.replace("pyimport","").replace("__class__","") + "}"
#print("Evaluating: {}".format(codeToRun))
baseDict = {}
baseDict.update(args)
context = js2py.EvalJs(baseDict)
context.execute(codeToRun)
ret["result"] = str(context.cc())
def doUrlopen(url):
return str(urllib.request.urlopen(urllib.request.Request(str(url)[1:-1],data=None,headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'})).read())
#End of eval funcs
#def run_discord():
# asyncio.set_event_loop(loop)
# loop.run_until_complete(client.start(getToken("discord")))
# loop.close()
if __name__ == "__main__":
print("Booting up...")
#loop = asyncio.get_event_loop()
#thread = threading.Thread(target=run_discord)
#thread.start()
while True:
try:
client.run(getToken("discord"))
except:
pass
#while True:
# user_input = input("> ")
| TesseractCat/TesseractBot | bot.py | Python | mit | 12,941 | [
"VisIt"
] | f303f3bf7c02b132dbaeb8d21b790feed74f67f568fadd175528c846040ec8b8 |
import numpy as np
def create_perturb_params(countsmat, transmat=None):
'''
Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability
'''
norm = np.sum(countsmat, axis=1)
if not transmat:
transmat = (countsmat.transpose() / norm).transpose()
counts = (np.ones((len(transmat), len(transmat))) * norm).transpose()
scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15
return transmat, scale
def perturb_tmat(transmat, scale):
'''
Perturbs each nonzero entry in the MSM transition matrix by treating it as a Gaussian random variable
with mean t_ij and standard deviation equal to the standard error computed using "create_perturb_params".
Returns a sampled transition matrix that takes into consideration errors due to finite sampling
(useful for boostrapping, etc.)
Parameters:
----------
transmat: np.ndarray:
The transition matrix, whose elements serve as the means of the Gaussian random variables
scale: np.ndarray:
The matrix of standard errors. For transition probability t_ij, this is assumed to be the standard
error of the mean of a binomial distribution with p = transition probability and number of observations
equal to the summed counts in row i.
'''
output = np.vectorize(np.random.normal)(transmat, scale)
output[np.where(output < 0)] = 0
return (output.transpose() / np.sum(output, axis=1)).transpose()
| msmbuilder/msmbuilder | msmbuilder/msm/validation/transmat_errorbar.py | Python | lgpl-2.1 | 2,619 | [
"Gaussian"
] | 0926f9554a41947069213cb0601a799fb5c8c873f1e24a8182ac78935fe0fd7f |
""" This is the RFIO StorageClass
"""
__RCSID__ = "$Id$"
import types
import re
import os
import time
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
class RFIOStorage( StorageBase ):
def __init__( self, storageName, parameters ):
StorageBase.__init__( self, storageName, parameters )
self.spaceToken = self.protocolParameters['SpaceToken']
self.isok = True
self.pluginName = 'RFIO'
self.timeout = 100
self.long_timeout = 600
#############################################################
#
# These are the methods for manipulating the client
#
def getName( self ):
""" The name with which the storage was instantiated
"""
return S_OK( self.name )
#############################################################
#
# These are the methods for file manipulation
#
def exists( self, path ):
""" Check if the given path exists. The 'path' variable can be a string or a list of strings.
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.exists: Determining the existance of %s files." % len( urls ) )
comm = "nsls -d"
for url in urls:
comm = " %s %s" % ( comm, url )
res = shellCall( self.timeout, comm )
successful = {}
failed = {}
if res['OK']:
returncode, stdout, stderr = res['Value']
if returncode in [0, 1]:
for line in stdout.splitlines():
url = line.strip()
successful[url] = True
for line in stderr.splitlines():
pfn, _ = line.split( ': ' )
url = pfn.strip()
successful[url] = False
else:
errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
return S_ERROR( errStr )
else:
errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
gLogger.error( errStr, "%s %s" % ( self.name, res['Message'] ) )
return S_ERROR( errStr )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def isFile( self, path ):
"""Check if the given path exists and it is a file
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.isFile: Determining whether %s paths are files." % len( urls ) )
successful = {}
failed = {}
comm = "nsls -ld"
for url in urls:
comm = " %s %s" % ( comm, url )
res = shellCall( self.timeout, comm )
if not res['OK']:
return res
returncode, stdout, stderr = res['Value']
if returncode in [0, 1]:
for line in stdout.splitlines():
permissions, _subdirs, _owner, _group, _size, _month, _date, _timeYear, pfn = line.split()
if permissions[0] != 'd':
successful[pfn] = True
else:
successful[pfn] = False
for line in stderr.splitlines():
pfn, error = line.split( ': ' )
url = pfn.strip()
failed[url] = error
else:
errStr = "RFIOStorage.isFile: Completely failed to determine whether path is file."
gLogger.error( errStr, "%s %s" % ( self.name, stderr ) )
return S_ERROR( errStr )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __getPathMetadata( self, urls ):
gLogger.debug( "RFIOStorage.__getPathMetadata: Attempting to get metadata for %s paths." % ( len( urls ) ) )
comm = "nsls -ld"
for url in urls:
comm = " %s %s" % ( comm, url )
res = shellCall( self.timeout, comm )
successful = {}
failed = {}
if not res['OK']:
errStr = "RFIOStorage.__getPathMetadata: Completely failed to get path metadata."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
else:
returncode, stdout, stderr = res['Value']
if not returncode in [0, 1]:
errStr = "RFIOStorage.__getPathMetadata: failed to perform nsls."
gLogger.error( errStr, stderr )
else:
for line in stdout.splitlines():
permissions, subdirs, owner, group, size, month, date, timeYear, pfn = line.split()
successful[pfn] = {}
if permissions[0] == 'd':
successful[pfn]['Type'] = 'Directory'
else:
successful[pfn]['Type'] = 'File'
successful[pfn]['Mode'] = self.__permissionsToInt( permissions )
successful[pfn]['NbSubDirs'] = subdirs
successful[pfn]['Owner'] = owner
successful[pfn]['Group'] = group
successful[pfn]['Size'] = int( size )
successful[pfn]['Month'] = month
successful[pfn]['Date'] = date
successful[pfn]['Year'] = timeYear
for line in stderr.splitlines():
pfn, error = line.split( ': ' )
url = pfn.strip()
failed[url] = error
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __permissionsToInt( self, permissions ):
mode = permissions[1:]
return sum( [ pow( 2, 8 - i ) * int( mode[i] != '-' ) for i in range( 0, 9 )] )
def __getFileMetadata( self, urls ):
gLogger.debug( "RFIOStorage.__getPathMetadata: Attempting to get additional metadata for %s files." % ( len( urls ) ) )
# Check whether the files that exist are staged
comm = "stager_qry -S %s" % self.spaceToken
successful = {}
for pfn in urls:
successful[pfn] = {}
comm = "%s -M %s" % ( comm, pfn )
res = shellCall( self.timeout, comm )
if not res['OK']:
errStr = "RFIOStorage.__getFileMetadata: Completely failed to get cached status."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
else:
_returncode, stdout, _stderr = res['Value']
for line in stdout.splitlines():
pfn = line.split()[0]
status = line.split()[-1]
if status in ['STAGED', 'CANBEMIGR']:
successful[pfn]['Cached'] = True
for pfn in urls:
if not successful[pfn].has_key( 'Cached' ):
successful[pfn]['Cached'] = False
# Now for the files that exist get the tape segment (i.e. whether they have been migrated) and related checksum
comm = "nsls -lT --checksum"
for pfn in urls:
comm = "%s %s" % ( comm, pfn )
res = shellCall( self.timeout, comm )
if not res['OK']:
errStr = "RFIOStorage.__getFileMetadata: Completely failed to get migration status."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
else:
_returncode, stdout, _stderr = res['Value']
for line in stdout.splitlines():
pfn = line.split()[-1]
checksum = line.split()[-2]
successful[pfn]['Migrated'] = True
successful[pfn]['Checksum'] = checksum
for pfn in urls:
if not successful[pfn].has_key( 'Migrated' ):
successful[pfn]['Migrated'] = False
resDict = {'Failed':{}, 'Successful':successful}
return S_OK( resDict )
def getFile( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for src_url in urls.keys():
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __getFile( self, src_url, dest_file ):
"""Get a local copy in the current directory of a physical file specified by its path
"""
if not os.path.exists( os.path.dirname( dest_file ) ):
os.makedirs( os.path.dirname( dest_file ) )
if os.path.exists( dest_file ):
gLogger.debug( "RFIOStorage.getFile: Local file already exists %s. Removing..." % dest_file )
os.remove( dest_file )
res = self.__executeOperation( src_url, 'getFileSize' )
if not res['OK']:
return S_ERROR( res['Message'] )
remoteSize = res['Value']
MIN_BANDWIDTH = 1024 * 100 # 100 KB/s
timeout = remoteSize / MIN_BANDWIDTH + 300
gLogger.debug( "RFIOStorage.getFile: Executing transfer of %s to %s" % ( src_url, dest_file ) )
comm = "rfcp %s %s" % ( src_url, dest_file )
res = shellCall( timeout, comm )
if res['OK']:
returncode, _stdout, stderr = res['Value']
if returncode == 0:
gLogger.debug( 'RFIOStorage.__getFile: Got file from storage, performing post transfer check.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
gLogger.debug( "RFIOStorage.getFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "RFIOStorage.__getFile: Source and destination file sizes do not match."
gLogger.error( errorMessage, src_url )
else:
errStr = "RFIOStorage.__getFile: Failed to get local copy of file."
gLogger.error( errStr, stderr )
errorMessage = "%s %s" % ( errStr, stderr )
else:
errStr = "RFIOStorage.__getFile: Failed to get local copy of file."
gLogger.error( errStr, res['Message'] )
errorMessage = "%s %s" % ( errStr, res['Message'] )
if os.path.exists( dest_file ):
gLogger.debug( "RFIOStorage.getFile: Removing local file %s." % dest_file )
os.remove( dest_file )
return S_ERROR( errorMessage )
def putFile( self, path, sourceSize = 0 ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.items():
res = self.__executeOperation( os.path.dirname( dest_url ), 'createDirectory' )
if not res['OK']:
failed[dest_url] = res['Message']
else:
res = self.__putFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __putFile( self, src_file, dest_url, sourceSize ):
"""Put a copy of the local file to the current directory on the physical storage
"""
# Pre-transfer check
res = self.__executeOperation( dest_url, 'exists' )
if not res['OK']:
gLogger.debug( "RFIOStorage.__putFile: Failed to find pre-existance of destination file." )
return res
if res['Value']:
res = self.__executeOperation( dest_url, 'removeFile' )
if not res['OK']:
gLogger.debug( "RFIOStorage.__putFile: Failed to remove remote file %s." % dest_url )
else:
gLogger.debug( "RFIOStorage.__putFile: Removed remote file %s." % dest_url )
if not os.path.exists( src_file ):
errStr = "RFIOStorage.__putFile: The source local file does not exist."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "RFIOStorage.__putFile: Failed to get file size."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
res = self.__getTransportURL( dest_url )
if not res['OK']:
gLogger.debug( "RFIOStorage.__putFile: Failed to get transport URL for file." )
return res
turl = res['Value']
MIN_BANDWIDTH = 1024 * 100 # 100 KB/s
timeout = sourceSize / MIN_BANDWIDTH + 300
gLogger.debug( "RFIOStorage.putFile: Executing transfer of %s to %s" % ( src_file, turl ) )
comm = "rfcp %s '%s'" % ( src_file, turl )
res = shellCall( timeout, comm )
if res['OK']:
returncode, _stdout, stderr = res['Value']
if returncode == 0:
gLogger.debug( 'RFIOStorage.putFile: Put file to storage, performing post transfer check.' )
res = self.__executeOperation( dest_url, 'getFileSize' )
if res['OK']:
destinationSize = res['Value']
if sourceSize == destinationSize :
gLogger.debug( "RFIOStorage.__putFile: Post transfer check successful." )
return S_OK( destinationSize )
errorMessage = "RFIOStorage.__putFile: Source and destination file sizes do not match."
gLogger.error( errorMessage, dest_url )
else:
errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
gLogger.error( errStr, stderr )
errorMessage = "%s %s" % ( errStr, stderr )
else:
errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
gLogger.error( errStr, res['Message'] )
errorMessage = "%s %s" % ( errStr, res['Message'] )
res = self.__executeOperation( dest_url, 'removeFile' )
if res['OK']:
gLogger.debug( "RFIOStorage.__putFile: Removed remote file remnant %s." % dest_url )
else:
gLogger.debug( "RFIOStorage.__putFile: Unable to remove remote file remnant %s." % dest_url )
return S_ERROR( errorMessage )
def removeFile( self, path ):
"""Remove physically the file specified by its path
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
listOfLists = breakListIntoChunks( urls, 100 )
for urls in listOfLists:
gLogger.debug( "RFIOStorage.removeFile: Attempting to remove %s files." % len( urls ) )
comm = 'stager_rm -S %s' % self.spaceToken
for url in urls:
comm = "%s -M %s" % ( comm, url )
res = shellCall( 100, comm )
if res['OK']:
returncode, _stdout, stderr = res['Value']
if returncode in [0, 1]:
comm = 'nsrm -f'
for url in urls:
comm = "%s %s" % ( comm, url )
res = shellCall( 100, comm )
if res['OK']:
returncode, _stdout, stderr = res['Value']
if returncode in [0, 1]:
for pfn in urls:
successful[pfn] = True
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the nameserver."
gLogger.error( errStr, stderr )
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the nameserver."
gLogger.error( errStr, res['Message'] )
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the stager."
gLogger.error( errStr, stderr )
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the stager."
gLogger.error( errStr, res['Message'] )
for pfn in urls:
failed[pfn] = errStr
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.getFileMetadata: Obtaining metadata for %s files." % len( urls ) )
res = self.__getPathMetadata( urls )
if not res['OK']:
return res
failed = {}
successful = {}
for pfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
failed[pfn] = 'File does not exist'
else:
failed[pfn] = error
files = []
for pfn, pfnDict in res['Value']['Successful'].items():
if pfnDict['Type'] == 'Directory':
failed[pfn] = "Supplied path is not a file"
else:
successful[pfn] = res['Value']['Successful'][pfn]
files.append( pfn )
if files:
res = self.__getFileMetadata( files )
if not res['OK']:
return res
for pfn, pfnDict in res['Value']['Successful'].items():
successful[pfn].update( pfnDict )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getFileSize( self, path ):
"""Get the physical size of the given file
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.getFileSize: Determining the sizes for %s files." % len( urls ) )
res = self.__getPathMetadata( urls )
if not res['OK']:
return res
failed = {}
successful = {}
for pfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
failed[pfn] = 'File does not exist'
else:
failed[pfn] = error
for pfn, pfnDict in res['Value']['Successful'].items():
if pfnDict['Type'] == 'Directory':
failed[pfn] = "Supplied path is not a file"
else:
successful[pfn] = res['Value']['Successful'][pfn]['Size']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def prestageFile( self, path ):
""" Issue prestage request for file
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
userTag = '%s-%s' % ( self.spaceToken, time.time() )
comm = "stager_get -S %s -U %s " % ( self.spaceToken, userTag )
for url in urls:
comm = "%s -M %s" % ( comm, url )
res = shellCall( 100, comm )
successful = {}
failed = {}
if res['OK']:
returncode, stdout, stderr = res['Value']
if returncode in [0, 1]:
for line in stdout.splitlines():
if re.search( 'SUBREQUEST_READY', line ):
pfn, _status = line.split()
successful[pfn] = userTag
elif re.search( 'SUBREQUEST_FAILED', line ):
pfn, _status, err = line.split( ' ', 2 )
failed[pfn] = err
else:
errStr = "RFIOStorage.prestageFile: Got unexpected return code from stager_get."
gLogger.error( errStr, stderr )
return S_ERROR( errStr )
else:
errStr = "RFIOStorage.prestageFile: Completely failed to issue stage requests."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def prestageFileStatus( self, path ):
""" Monitor the status of a prestage request
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
requestFiles = {}
for url, requestID in urls.items():
if not requestFiles.has_key( requestID ):
requestFiles[requestID] = []
requestFiles[requestID].append( url )
for requestID, urls in requestFiles.items():
comm = "stager_qry -S %s -U %s " % ( self.spaceToken, requestID )
res = shellCall( 100, comm )
if res['OK']:
returncode, stdout, stderr = res['Value']
if returncode in [0, 1]:
for line in stdout.splitlines():
pfn = line.split()[0]
status = line.split()[-1]
if status in ['STAGED', 'CANBEMIGR']:
successful[pfn] = True
else:
successful[pfn] = False
else:
errStr = "RFIOStorage.prestageFileStatus: Got unexpected return code from stager_get."
gLogger.error( errStr, stderr )
return S_ERROR( errStr )
else:
errStr = "RFIOStorage.prestageFileStatus: Completely failed to obtain prestage status."
gLogger.error( errStr, res['Message'] )
return S_ERROR( errStr )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getTransportURL( self, path, protocols = False ):
""" Obtain the TURLs for the supplied path and protocols
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
res = self.exists( urls )
if not res['OK']:
return res
for path, exists in res['Value']['Successful'].items():
if not exists:
failed[path] = 'File does not exist'
else:
res = self.__getTransportURL( path )
if not res['OK']:
failed[path] = res['Message']
else:
successful[path] = res['Value']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __getTransportURL( self, path ):
try:
if self.spaceToken:
tURL = "%s://%s:%s/?svcClass=%s&castorVersion=2&path=%s" % ( self.protocolParameters['Protocol'],
self.protocolParameters['Host'],
self.protocolParameters['Port'],
self.spaceToken,
path )
else:
tURL = "castor:%s" % ( path )
return S_OK( tURL )
except Exception, x:
errStr = "RFIOStorage.__getTransportURL: Exception while creating turl."
gLogger.exception( errStr, self.name, x )
return S_ERROR( errStr )
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory( self, path ):
"""Check if the given path exists and it is a directory
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.isDirectory: Determining whether %s paths are directories." % len( urls ) )
res = self.__getPathMetadata( urls )
if not res['OK']:
return res
failed = {}
successful = {}
for pfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
failed[pfn] = 'Directory does not exist'
else:
failed[pfn] = error
for pfn, pfnDict in res['Value']['Successful'].items():
if pfnDict['Type'] == 'Directory':
successful[pfn] = True
else:
successful[pfn] = False
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectory( self, path, localPath = False ):
""" Get locally a directory from the physical storage together with all its files and subdirectories.
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "RFIOStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
for src_directory in urls:
dirName = os.path.basename( src_directory )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getDir( src_directory, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
gLogger.debug( "RFIOStorage.getDirectory: Successfully got local copy of %s" % src_directory )
successful[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
gLogger.error( "RFIOStorage.getDirectory: Failed to get entire directory.", src_directory )
failed[src_directory] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
gLogger.error( "RFIOStorage.getDirectory: Completely failed to get local copy of directory.", src_directory )
failed[src_directory] = {'Files':0, 'Size':0}
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __getDir( self, srcDirectory, destDirectory ):
""" Black magic contained within...
"""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.isDirectory( srcDirectory )
if not res['OK']:
errStr = "RFIOStorage.__getDir: Failed to find the supplied source directory."
gLogger.error( errStr, srcDirectory )
return S_ERROR( errStr )
if not res['Value']['Successful'].has_key( srcDirectory ):
errStr = "RFIOStorage.__getDir: Failed to find the supplied source directory."
gLogger.error( errStr, srcDirectory )
return S_ERROR( errStr )
if not res['Value']['Successful'][srcDirectory]:
errStr = "RFIOStorage.__getDir: The supplied source directory does not exist."
gLogger.error( errStr, srcDirectory )
return S_ERROR( errStr )
# Check the local directory exists and create it if not
if not os.path.exists( destDirectory ):
os.makedirs( destDirectory )
# Get the remote directory contents
res = self.listDirectory( srcDirectory )
if not res['OK']:
errStr = "RFIOStorage.__getDir: Failed to list the source directory."
gLogger.error( errStr, srcDirectory )
if not res['Value']['Successful'].has_key( srcDirectory ):
errStr = "RFIOStorage.__getDir: Failed to list the source directory."
gLogger.error( errStr, srcDirectory )
surlsDict = res['Value']['Successful'][srcDirectory]['Files']
subDirsDict = res['Value']['Successful'][srcDirectory]['SubDirs']
# First get all the files in the directory
gotFiles = True
for surl in surlsDict.keys():
surlGot = False
fileSize = surlsDict[surl]['Size']
fileName = os.path.basename( surl )
localPath = '%s/%s' % ( destDirectory, fileName )
fileDict = {surl:localPath}
res = self.getFile( fileDict )
if res['OK']:
if res['Value']['Successful'].has_key( surl ):
filesGot += 1
sizeGot += fileSize
surlGot = True
if not surlGot:
gotFiles = False
# Then recursively get the sub directories
subDirsGot = True
for subDir in subDirsDict.keys():
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( destDirectory, subDirName )
dirSuccessful = False
res = self.__getDir( subDir, localPath )
if res['OK']:
if res['Value']['AllGot']:
dirSuccessful = True
filesGot += res['Value']['Files']
sizeGot += res['Value']['Size']
if not dirSuccessful:
subDirsGot = False
# Check whether all the operations were successful
if subDirsGot and gotFiles:
allGot = True
else:
allGot = False
resDict = {'AllGot':allGot, 'Files':filesGot, 'Size':sizeGot}
return S_OK( resDict )
def putDirectory( self, path ):
""" Put a local directory to the physical storage together with all its files and subdirectories.
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "RFIOStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putDir( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
gLogger.debug( "RFIOStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
gLogger.error( "RFIOStorage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
gLogger.error( "RFIOStorage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = {'Files':0, 'Size':0}
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __putDir( self, src_directory, dest_directory ):
""" Black magic contained within...
"""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "RFIOStorage.__putDir: The supplied source directory does not exist."
gLogger.error( errStr, src_directory )
return S_ERROR( errStr )
# Create the remote directory
res = self.createDirectory( dest_directory )
if not res['OK']:
errStr = "RFIOStorage.__putDir: Failed to create destination directory."
gLogger.error( errStr, dest_directory )
return S_ERROR( errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
for cFile in contents:
pathSuccessful = False
localPath = '%s/%s' % ( src_directory, cFile )
remotePath = '%s/%s' % ( dest_directory, cFile )
if os.path.isdir( localPath ):
res = self.__putDir( localPath, remotePath )
if res['OK']:
if res['Value']['AllPut']:
pathSuccessful = True
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
else:
return S_ERROR( 'Failed to put directory' )
else:
fileDict = {remotePath:localPath}
res = self.putFile( fileDict )
if res['OK']:
if res['Value']['Successful'].has_key( remotePath ):
filesPut += 1
sizePut += res['Value']['Successful'][remotePath]
pathSuccessful = True
if not pathSuccessful:
allSuccessful = False
resDict = {'AllPut':allSuccessful, 'Files':filesPut, 'Size':sizePut}
return S_OK( resDict )
def createDirectory( self, path ):
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "RFIOStorage.createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
strippedUrl = url.rstrip( '/' )
res = self.__makeDirs( strippedUrl )
if res['OK']:
gLogger.debug( "RFIOStorage.createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
gLogger.error( "RFIOStorage.createDirectory: Failed to create directory on storage.", url )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __makeDir( self, path ):
# First create a local file that will be used as a directory place holder in storage name space
comm = "nsmkdir -m 775 %s" % path
res = shellCall( 100, comm )
if not res['OK']:
return res
returncode, _stdout, stderr = res['Value']
if not returncode in [0]:
return S_ERROR( stderr )
return S_OK()
def __makeDirs( self, path ):
""" Black magic contained within....
"""
pDir = os.path.dirname( path )
res = self.exists( path )
if not res['OK']:
return res
if res['OK']:
if res['Value']['Successful'].has_key( path ):
if res['Value']['Successful'][path]:
return S_OK()
else:
res = self.exists( pDir )
if res['OK']:
if res['Value']['Successful'].has_key( pDir ):
if res['Value']['Successful'][pDir]:
res = self.__makeDir( path )
else:
res = self.__makeDirs( pDir )
res = self.__makeDir( path )
return res
def removeDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
comm = "nsrm -r %s" % url
res = shellCall( 100, comm )
if res['OK']:
returncode, _stdout, stderr = res['Value']
if returncode == 0:
successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
elif returncode == 1:
successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
else:
failed[url] = stderr
else:
errStr = "RFIOStorage.removeDirectory: Completely failed to remove directory."
gLogger.error( errStr, "%s %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def listDirectory( self, path ):
""" List the supplied path. First checks whether the path is a directory then gets the contents.
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.listDirectory: Attempting to list %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
successful = {}
failed = res['Value']['Failed']
directories = []
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories.append( url )
else:
errStr = "RFIOStorage.listDirectory: Directory does not exist."
gLogger.error( errStr, url )
failed[url] = errStr
for directory in directories:
comm = "nsls -l %s" % directory
res = shellCall( self.timeout, comm )
if res['OK']:
returncode, stdout, stderr = res['Value']
if not returncode == 0:
errStr = "RFIOStorage.listDirectory: Failed to list directory."
gLogger.error( errStr, "%s %s" % ( directory, stderr ) )
failed[directory] = errStr
else:
subDirs = {}
files = {}
successful[directory] = {}
for line in stdout.splitlines():
permissions, _subdirs, _owner, _group, size, _month, _date, _timeYear, pfn = line.split()
if not pfn == 'dirac_directory':
path = "%s/%s" % ( directory, pfn )
if permissions[0] == 'd':
# If the subpath is a directory
subDirs[path] = True
elif permissions[0] == 'm':
# In the case that the path is a migrated file
files[path] = {'Size':int( size ), 'Migrated':1}
else:
# In the case that the path is not migrated file
files[path] = {'Size':int( size ), 'Migrated':0}
successful[directory]['SubDirs'] = subDirs
successful[directory]['Files'] = files
else:
errStr = "RFIOStorage.listDirectory: Completely failed to list directory."
gLogger.error( errStr, "%s %s" % ( directory, res['Message'] ) )
return S_ERROR( errStr )
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectoryMetadata( self, path ):
""" Get the metadata for the directory
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.getDirectoryMetadata: Attempting to get metadata for %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
successful = {}
failed = res['Value']['Failed']
directories = []
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories.append( url )
else:
errStr = "RFIOStorage.getDirectoryMetadata: Directory does not exist."
gLogger.error( errStr, url )
failed[url] = errStr
res = self.__getPathMetadata( directories )
if not res['OK']:
return res
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
"""
res = self.__checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
gLogger.debug( "RFIOStorage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
res = self.listDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for directory, dirDict in res['Value']['Successful'].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict['Files']
for _fileURL, fileDict in filesDict.items():
directorySize += fileDict['Size']
directoryFiles += 1
gLogger.debug( "RFIOStorage.getDirectorySize: Successfully obtained size of %s." % directory )
successful[directory] = {'Files':directoryFiles, 'Size':directorySize}
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __checkArgumentFormat( self, path ):
""" FIXME: Can be replaced by a generic checkArgumentFormat Utility
"""
if type( path ) in types.StringTypes:
urls = [path]
elif type( path ) == types.ListType:
urls = path
elif type( path ) == types.DictType:
urls = path.keys()
else:
return S_ERROR( "RFIOStorage.__checkArgumentFormat: Supplied path is not of the correct format." )
return S_OK( urls )
def __executeOperation( self, url, method ):
""" Executes the requested functionality with the supplied url
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return S_ERROR( "Unable to invoke %s, it isn't a member funtion of RFIOStorage" % method )
res = fcn( url )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
| vmendez/DIRAC | Resources/Storage/RFIOStorage.py | Python | gpl-3.0 | 38,359 | [
"DIRAC"
] | 72f91ba78caa4b8b88e431b67722fa7418f0d561732f655696640642dbe85de7 |
#-------------------------------------------------------------------------------
# Core routines for computing properties of symmetric random matrices.
#-------------------------------------------------------------------------------
import numpy
ra = numpy.random
la = numpy.linalg
def GOE(N):
"""Creates an NxN element of the Gaussian Orthogonal Ensemble"""
m = ra.standard_normal((N,N))
m += m.T
return m
def centerEigenvalueDiff(mat):
"""Compute the eigvals of mat and then find the center eigval difference."""
N = len(mat)
evals = numpy.sort(la.eigvals(mat))
diff = evals[N/2] - evals[N/2-1]
return diff.real
def ensembleDiffs(num, N):
"""Return an array of num eigenvalue differences for the NxN GOE
ensemble."""
diffs = numpy.empty(num)
for i in xrange(num):
mat = GOE(N)
diffs[i] = centerEigenvalueDiff(mat)
return diffs
def normalizeDiffs(diffs):
"""Normalize an array of eigenvalue diffs."""
return diffs/diffs.mean()
def normalizedEnsembleDiffs(num, N):
"""Return an array of num *normalized eigenvalue differences for the NxN
GOE ensemble."""
diffs = ensembleDiffs(num, N)
return normalizeDiffs(diffs)
| mastizada/kuma | vendor/packages/ipython/docs/examples/kernel/rmtkernel.py | Python | mpl-2.0 | 1,221 | [
"Gaussian"
] | cc35922bccf2a12e39c7039edf3707db232552152ef0a90b261ff303e9a2efdd |
"""
Bok choy acceptance and a11y tests for problem types in the LMS
See also lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
import random
import textwrap
import unittest
from abc import ABCMeta, abstractmethod
from nose.plugins.attrib import attr
from selenium.webdriver import ActionChains
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory,
)
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.tests.helpers import select_option_by_text
from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest
from common.test.acceptance.tests.helpers import EventsTestMixin
class ProblemTypeTestBaseMeta(ABCMeta):
"""
MetaClass for ProblemTypeTestBase to ensure that the required attributes
are defined in the inheriting classes.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
required_attrs = [
'problem_name',
'problem_type',
'factory',
'factory_kwargs',
'status_indicators',
]
for required_attr in required_attrs:
msg = ('{} is a required attribute for {}').format(
required_attr, str(cls)
)
try:
if obj.__getattribute__(required_attr) is None:
raise NotImplementedError(msg)
except AttributeError:
raise NotImplementedError(msg)
return obj
class ProblemTypeTestBase(ProblemsTest, EventsTestMixin):
"""
Base class for testing assesment problem types in bok choy.
This inherits from ProblemsTest, which has capabilities for testing problem
features that are not problem type specific (checking, hinting, etc.).
The following attributes must be explicitly defined when inheriting from
this class:
problem_name (str)
problem_type (str)
factory (ResponseXMLFactory subclass instance)
Additionally, the default values for factory_kwargs and status_indicators
may need to be overridden for some problem types.
"""
__metaclass__ = ProblemTypeTestBaseMeta
problem_name = None
problem_type = None
factory = None
factory_kwargs = {}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self):
"""
Visits courseware_page and defines self.problem_page.
"""
super(ProblemTypeTestBase, self).setUp()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always'}
)
def wait_for_status(self, status):
"""
Waits for the expected status indicator.
Args:
status: one of ("correct", "incorrect", "unanswered)
"""
msg = "Wait for status to be {}".format(status)
selector = ', '.join(self.status_indicators[status])
self.problem_page.wait_for_element_visibility(selector, msg)
@abstractmethod
def answer_problem(self, correct):
"""
Args:
`correct` (bool): Inputs correct answer if True, else inputs
incorrect answer.
"""
raise NotImplementedError()
class ProblemTypeTestMixin(object):
"""
Test cases shared amongst problem types.
"""
@attr('shard_2')
def test_answer_correctly(self):
"""
Scenario: I can answer a problem correctly
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then my "<ProblemType>" answer is marked "correct"
And The "<ProblemType>" problem displays a "correct" answer
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Make sure we're looking at the right problem
self.assertEqual(self.problem_page.problem_name, self.problem_name)
# Answer the problem correctly
self.answer_problem(correct=True)
self.problem_page.click_check()
self.wait_for_status('correct')
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
@attr('shard_2')
def test_answer_incorrectly(self):
"""
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "incorrect" answer
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem incorrectly
self.answer_problem(correct=False)
self.problem_page.click_check()
self.wait_for_status('incorrect')
@attr('shard_2')
def test_submit_blank_answer(self):
"""
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I check a problem
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "blank" answer
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Leave the problem unchanged and click check.
self.problem_page.click_check()
self.wait_for_status('incorrect')
@attr('a11y')
@unittest.skip("Does not check a11y in gacco")
def test_problem_type_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Set the scope to the problem container
self.problem_page.a11y_audit.config.set_scope(
include=['div#seq_content'])
# Run the accessibility audit.
self.problem_page.a11y_audit.check_for_accessibility_errors()
class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Annotation Problem Type
"""
problem_name = 'ANNOTATION TEST PROBLEM'
problem_type = 'annotationresponse'
factory = AnnotationResponseXMLFactory()
factory_kwargs = {
'title': 'Annotation Problem',
'text': 'The text being annotated',
'comment': 'What do you think the about this text?',
'comment_prompt': 'Type your answer below.',
'tag_prompt': 'Which of these items most applies to the text?',
'options': [
('dog', 'correct'),
('cat', 'incorrect'),
('fish', 'partially-correct'),
]
}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'partially-correct': ['span.partially-correct'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for AnnotationProblemTypeTest
"""
super(AnnotationProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-293
]
})
def answer_problem(self, correct):
"""
Answer annotation problem.
"""
choice = 0 if correct else 1
answer = 'Student comment'
self.problem_page.q(css='div.problem textarea.comment').fill(answer)
self.problem_page.q(
css='div.problem span.tag'.format(choice=choice)
).nth(choice).click()
class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Checkbox Problem Type
"""
problem_name = 'CHECKBOX TEST PROBLEM'
problem_type = 'checkbox'
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and Choice 2',
'choice_type': 'checkbox',
'choices': [True, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3']
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxProblemTypeTest
"""
super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-251
'aria-valid-attr', # TODO: AC-251
'aria-roles', # TODO: AC-251
'checkboxgroup', # TODO: AC-251
]
})
def answer_problem(self, correct):
"""
Answer checkbox problem.
"""
if correct:
self.problem_page.click_choice("choice_0")
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Multiple Choice Problem Type
"""
problem_name = 'MULTIPLE CHOICE TEST PROBLEM'
problem_type = 'multiple choice'
factory = MultipleChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for MultipleChoiceProblemTypeTest
"""
super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'aria-valid-attr', # TODO: AC-251
'radiogroup', # TODO: AC-251
]
})
def answer_problem(self, correct):
"""
Answer multiple choice problem.
"""
if correct:
self.problem_page.click_choice("choice_choice_2")
else:
self.problem_page.click_choice("choice_choice_1")
class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Radio Problem Type
"""
problem_name = 'RADIO TEST PROBLEM'
problem_type = 'radio'
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioProblemTypeTest
"""
super(RadioProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'aria-valid-attr', # TODO: AC-292
'radiogroup', # TODO: AC-292
]
})
def answer_problem(self, correct):
"""
Answer radio problem.
"""
if correct:
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Drop Down Problem Type
"""
problem_name = 'DROP DOWN TEST PROBLEM'
problem_type = 'drop down'
factory = OptionResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'
}
def setUp(self, *args, **kwargs):
"""
Additional setup for DropDownProblemTypeTest
"""
super(DropDownProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-291
]
})
def answer_problem(self, correct):
"""
Answer drop down problem.
"""
answer = 'Option 2' if correct else 'Option 3'
selector_element = self.problem_page.q(
css='.problem .option-input select')
select_option_by_text(selector_element, answer)
class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for String Problem Type
"""
problem_name = 'STRING TEST PROBLEM'
problem_type = 'string'
factory = StringResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for StringProblemTypeTest
"""
super(StringProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-290
]
})
def answer_problem(self, correct):
"""
Answer string problem.
"""
textvalue = 'correct string' if correct else 'incorrect string'
self.problem_page.fill_answer(textvalue)
class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Numerical Problem Type
"""
problem_name = 'NUMERICAL TEST PROBLEM'
problem_type = 'numerical'
factory = NumericalResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True,
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for NumericalProblemTypeTest
"""
super(NumericalProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-289
]
})
def answer_problem(self, correct):
"""
Answer numerical problem.
"""
textvalue = "pi + 1" if correct else str(random.randint(-2, 2))
self.problem_page.fill_answer(textvalue)
class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Formula Problem Type
"""
problem_name = 'FORMULA TEST PROBLEM'
problem_type = 'formula'
factory = FormulaResponseXMLFactory()
factory_kwargs = {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for FormulaProblemTypeTest
"""
super(FormulaProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-288
]
})
def answer_problem(self, correct):
"""
Answer formula problem.
"""
textvalue = "x^2+2*x+y" if correct else 'x^2'
self.problem_page.fill_answer(textvalue)
class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Script Problem Type
"""
problem_name = 'SCRIPT TEST PROBLEM'
problem_type = 'script'
factory = CustomResponseXMLFactory()
factory_kwargs = {
'question_text': 'Enter two integers that sum to 10.',
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
"""),
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for ScriptProblemTypeTest
"""
super(ScriptProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-287
]
})
def answer_problem(self, correct):
"""
Answer script problem.
"""
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if not correct:
second_addend += random.randint(1, 10)
self.problem_page.fill_answer(first_addend, input_num=0)
self.problem_page.fill_answer(second_addend, input_num=1)
class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Code Problem Type
"""
problem_name = 'CODE TEST PROBLEM'
problem_type = 'code'
factory = CodeResponseXMLFactory()
factory_kwargs = {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}',
}
status_indicators = {
'correct': ['.grader-status .correct ~ .debug'],
'incorrect': ['.grader-status .incorrect ~ .debug'],
'unanswered': ['.grader-status .unanswered ~ .debug'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CodeProblemTypeTest
"""
super(CodeProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # TODO: AC-286
'label', # TODO: AC-286
]
})
def answer_problem(self, correct):
"""
Answer code problem.
"""
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
def test_answer_incorrectly(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase):
"""
Base class for "Choice + Text" Problem Types.
(e.g. RadioText, CheckboxText)
"""
choice_type = None
def _select_choice(self, input_num):
"""
Selects the nth (where n == input_num) choice of the problem.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="{}"]'.format(self.choice_type)
).nth(input_num).click()
def _fill_input_text(self, value, input_num):
"""
Fills the nth (where n == input_num) text input field of the problem
with value.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="text"]'
).nth(input_num).fill(value)
def answer_problem(self, correct):
"""
Answer radio text problem.
"""
choice = 0 if correct else 1
input_value = "8" if correct else "5"
self._select_choice(choice)
self._fill_input_text(input_value, choice)
class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Radio Text Problem Type
"""
problem_name = 'RADIO TEXT TEST PROBLEM'
problem_type = 'radio_text'
choice_type = 'radio'
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
status_indicators = {
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioTextProblemTypeTest
"""
super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-285
'radiogroup', # TODO: AC-285
]
})
class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Checkbox Text Problem Type
"""
problem_name = 'CHECKBOX TEXT TEST PROBLEM'
problem_type = 'checkbox_text'
choice_type = 'checkbox'
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxTextProblemTypeTest
"""
super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-284
'checkboxgroup', # TODO: AC-284
]
})
class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Image Problem Type
"""
problem_name = 'IMAGE TEST PROBLEM'
problem_type = 'image'
factory = ImageResponseXMLFactory()
factory_kwargs = {
'src': '/static/images/placeholder-image.png',
'rectangle': '(0,0)-(50,50)',
}
def answer_problem(self, correct):
"""
Answer image problem.
"""
offset = 25 if correct else -25
input_selector = ".imageinput [id^='imageinput_'] img"
input_element = self.problem_page.q(css=input_selector)[0]
chain = ActionChains(self.browser)
chain.move_to_element(input_element)
chain.move_by_offset(offset, offset)
chain.click()
chain.perform()
class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Symbolic Problem Type
"""
problem_name = 'SYMBOLIC TEST PROBLEM'
problem_type = 'symbolicresponse'
factory = SymbolicResponseXMLFactory()
factory_kwargs = {
'expect': '2*x+3*y',
}
status_indicators = {
'correct': ['span div.correct'],
'incorrect': ['span div.incorrect'],
'unanswered': ['span div.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for SymbolicProblemTypeTest
"""
super(SymbolicProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'label', # TODO: AC-294
]
})
def answer_problem(self, correct):
"""
Answer symbolic problem.
"""
choice = "2*x+3*y" if correct else "3*a+4*b"
self.problem_page.fill_answer(choice)
| nttks/edx-platform | common/test/acceptance/tests/lms/test_problem_types.py | Python | agpl-3.0 | 26,501 | [
"VisIt"
] | 402650e760803a93de3cbe3c607ab2a108f62d2e083e3db06e833913d18b8e9f |
"""
desisim.scripts.quickgalaxies
=============================
"""
from __future__ import absolute_import, division, print_function
import healpy as hp
import numpy as np
import os
from datetime import datetime
from abc import abstractmethod, ABCMeta
from argparse import Action, ArgumentParser
from astropy.table import Table, vstack
from desisim.templates import BGS
from desisim.scripts.quickspectra import sim_spectra
from desitarget.mock.mockmaker import BGSMaker
from desitarget.cuts import isBGS_colors
from desiutil.log import get_logger, DEBUG
from yaml import load
import matplotlib.pyplot as plt
class SetDefaultFromFile(Action, metaclass=ABCMeta):
"""Abstract interface class to set command-line arguments from a file."""
def __call__(self, parser, namespace, values, option_string=None):
config = self._get_config_from_file(values)
for key, value in config.items():
setattr(namespace, key, value)
@abstractmethod
def _get_config_from_file(self, filename):
raise NotImplementedError
class SetDefaultFromYAMLFile(SetDefaultFromFile):
"""Concrete class that sets command-line arguments from a YAML file."""
def _get_config_from_file(self, filename):
"""Implementation of configuration reader.
Parameters
----------
filename : string
Name of configuration file to read.
Returns
-------
config : dictionary
Configuration dictionary.
"""
with open(filename, 'r') as f:
config = load(f)
return config
def _get_healpixels_in_footprint(nside=64):
"""Obtain a list of HEALPix pixels in the DESI footprint.
Parameters
----------
nside : int
HEALPix nside parameter (in form nside=2**k, k=[1,2,3,...]).
Returns
-------
healpixels : ndarray
List of HEALPix pixels within the DESI footprint.
"""
from desimodel import footprint
from desimodel.io import load_tiles
# Load DESI tiles.
tile_tab = load_tiles()
npix = hp.nside2npix(nside)
pix_ids = np.arange(npix)
ra, dec = hp.pix2ang(nside, pix_ids, lonlat=True)
# Get a list of pixel IDs inside the DESI footprint.
in_desi = footprint.is_point_in_desi(tile_tab, ra, dec)
healpixels = pix_ids[in_desi]
return healpixels
def _default_wave(wavemin=None, wavemax=None, dw=0.2):
"""Generate a default wavelength vector for the output spectra."""
from desimodel.io import load_throughput
if wavemin is None:
wavemin = load_throughput('b').wavemin - 10.0
if wavemax is None:
wavemax = load_throughput('z').wavemax + 10.0
return np.arange(round(wavemin, 1), wavemax, dw)
def bgs_write_simdata(sim, overwrite=False):
"""Create a metadata table with simulation inputs.
Parameters
----------
sim : dict
Simulation parameters from command line.
overwrite : bool
Overwrite simulation data file.
Returns
-------
simdata : Table
Data table written to disk.
"""
from desispec.io.util import makepath
from desispec.io.util import write_bintable
simdatafile = os.path.join(sim.simdir,
'bgs_{}_simdata.fits'.format(sim.simid))
makepath(simdatafile)
cols = [
('SEED', 'S20'),
('NSPEC', 'i4'),
('EXPTIME', 'f4'),
('AIRMASS', 'f4'),
('SEEING', 'f4'),
('MOONFRAC', 'f4'),
('MOONSEP', 'f4'),
('MOONALT', 'f4')]
simdata = Table(np.zeros(sim.nsim, dtype=cols))
simdata['EXPTIME'].unit = 's'
simdata['SEEING'].unit = 'arcsec'
simdata['MOONSEP'].unit = 'deg'
simdata['MOONALT'].unit = 'deg'
simdata['SEED'] = sim.seed
simdata['NSPEC'] = sim.nspec
simdata['AIRMASS'] = sim.airmass
simdata['SEEING'] = sim.seeing
simdata['MOONALT'] = sim.moonalt
simdata['MOONSEP'] = sim.moonsep
simdata['MOONFRAC'] = sim.moonfrac
simdata['EXPTIME'] = sim.exptime
if overwrite or not os.path.isfile(simdatafile):
print('Writing {}'.format(simdatafile))
write_bintable(simdatafile, simdata, extname='SIMDATA', clobber=True)
return simdata
def simdata2obsconditions(sim):
"""Pack simdata observation conditions into a dictionary.
Parameters
----------
simdata : Table
Simulation data table.
Returns
-------
obs : dict
Observation conditions dictionary.
"""
obs = dict(AIRMASS=sim.airmass,
EXPTIME=sim.exptime,
MOONALT=sim.moonalt,
MOONFRAC=sim.moonfrac,
MOONSEP=sim.moonsep,
SEEING=sim.seeing)
return obs
def write_templates(filename, flux, wave, target, truth, objtruth):
"""Write galaxy templates to a FITS file.
Parameters
----------
filename : str
Path to output file.
flux : ndarray
Array of flux data for template spectra.
wave : ndarray
Array of wavelengths.
target : Table
Target information.
truth : Table
Template simulation truth.
objtruth : Table
Object-specific truth data.
"""
import astropy.units as u
from astropy.io import fits
hx = fits.HDUList()
# Write the wavelength table.
hdu_wave = fits.PrimaryHDU(wave)
hdu_wave.header['EXTNAME'] = 'WAVE'
hdu_wave.header['BUNIT'] = 'Angstrom'
hdu_wave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_wave)
# Write the flux table.
fluxunits = 1e-17 * u.erg / (u.s * u.cm**2 * u.Angstrom)
hdu_flux = fits.ImageHDU(flux)
hdu_flux.header['EXTNAME'] = 'FLUX'
hdu_flux.header['BUNIT'] = str(fluxunits)
hx.append(hdu_flux)
# Write targets table.
hdu_targets = fits.table_to_hdu(target)
hdu_targets.header['EXTNAME'] = 'TARGETS'
hx.append(hdu_targets)
# Write truth table.
hdu_truth = fits.table_to_hdu(truth)
hdu_truth.header['EXTNAME'] = 'TRUTH'
hx.append(hdu_truth)
# Write objtruth table.
hdu_objtruth = fits.table_to_hdu(objtruth)
hdu_objtruth.header['EXTNAME'] = 'OBJTRUTH'
hx.append(hdu_objtruth)
print('Writing {}'.format(filename))
hx.writeto(filename, overwrite=True)
def parse(options=None):
"""Parse command-line options.
"""
parser = ArgumentParser(description='Fast galaxy simulator')
parser.add_argument('--config', action=SetDefaultFromYAMLFile)
#
# Observational conditions.
#
cond = parser.add_argument_group('Observing conditions')
cond.add_argument('--airmass', dest='airmass', type=float, default=1.,
help='Airmass [1..40].')
cond.add_argument('--exptime', dest='exptime', type=int, default=300,
help='Exposure time [s].')
cond.add_argument('--seeing', dest='seeing', type=float, default=1.1,
help='Seeing [arcsec].')
cond.add_argument('--moonalt', dest='moonalt', type=float, default=-60.,
help='Moon altitude [deg].')
cond.add_argument('--moonfrac', dest='moonfrac', type=float, default=0.,
help='Illuminated moon fraction [0..1].')
cond.add_argument('--moonsep', dest='moonsep', type=float, default=180.,
help='Moon separation angle [deg].')
#
# Galaxy simulation settings.
#
mcset = parser.add_argument_group('Simulation settings')
mcset.add_argument('--nside', dest='nside', type=int, default=64,
help='HEALPix NSIDE parameter.')
mcset.add_argument('--nspec', dest='nspec', type=int, default=100,
help='Number of spectra per HEALPix pixel.')
mcset.add_argument('--nsim', dest='nsim', type=int, default=10,
help='Number of simulations (HEALPix pixels).')
mcset.add_argument('--seed', dest='seed', type=int, default=None,
help='Random number seed')
mcset.add_argument('--addsnia', dest='addsnia', action='store_true', default=False,
help='Add SNe Ia to host spectra.')
mcset.add_argument('--addsniip', dest='addsniip', action='store_true', default=False,
help='Add SNe IIp to host spectra.')
mcset.add_argument('--snrmin', dest='snrmin', type=float, default=0.01,
help='SN/host minimum flux ratio.')
mcset.add_argument('--snrmax', dest='snrmax', type=float, default=1.00,
help='SN/host maximum flux ratio.')
#
# Output settings.
#
output = parser.add_argument_group('Output settings')
output.add_argument('--simid', dest='simid',
default=datetime.now().strftime('%Y-%m-%d'),
help='ID/name for simulations.')
output.add_argument('--simdir', dest='simdir', default='',
help='Simulation output directory absolute path.')
# Parse command line options.
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
# Save simulation output.
rng = np.random.RandomState(args.seed)
simdata = bgs_write_simdata(args)
obs = simdata2obsconditions(args)
# Generate list of HEALPix pixels to randomly sample from the mocks.
healpixels = _get_healpixels_in_footprint(nside=args.nside)
npix = np.minimum(10*args.nsim, len(healpixels))
pixels = rng.choice(healpixels, size=npix, replace=False)
ipix = iter(pixels)
# Set up the template generator.
maker = BGSMaker(seed=args.seed)
maker.template_maker = BGS(add_SNeIa=args.addsnia,add_SNeIIp=args.addsniip, wave=_default_wave())
for j in range(args.nsim):
# Loop until finding a non-empty healpixel (one with mock galaxies).
tdata = []
while len(tdata) == 0:
pixel = next(ipix)
tdata = maker.read(healpixels=pixel, nside=args.nside)
# Add SN generation options.
if args.addsnia or args.addsniip:
tdata['SNE_FLUXRATIORANGE'] = (args.snrmin, args.snrmax)
tdata['SNE_FILTER'] = 'decam2014-r'
# Generate nspec spectral templates and write them to "truth" files.
wave = None
flux, targ, truth, obj = [], [], [], []
# Generate templates until we have enough to pass brightness cuts.
ntosim = np.min((args.nspec, len(tdata['RA'])))
ngood = 0
while ngood < args.nspec:
idx = rng.choice(len(tdata['RA']), ntosim)
tflux, twave, ttarg, ttruth, tobj = \
maker.make_spectra(tdata, indx=idx)
# Apply color cuts.
is_bright = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='bright')
is_faint = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='faint')
is_wise = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='wise')
keep = np.logical_or(np.logical_or(is_bright, is_faint), is_wise)
_ngood = np.count_nonzero(keep)
if _ngood > 0:
ngood += _ngood
flux.append(tflux[keep, :])
targ.append(ttarg[keep])
truth.append(ttruth[keep])
obj.append(tobj[keep])
wave = maker.wave
flux = np.vstack(flux)[:args.nspec, :]
targ = vstack(targ)[:args.nspec]
truth = vstack(truth)[:args.nspec]
obj = vstack(obj)[:args.nspec]
if args.addsnia or args.addsniip:
# TARGETID in truth table is split in two; deal with it here.
truth['TARGETID'] = truth['TARGETID_1']
# Set up and verify the TARGETID across all truth tables.
n = len(truth)
new_id = 10000000*pixel + 100000*j + np.arange(1, n+1)
truth['TARGETID'][:] = new_id
targ['TARGETID'][:] = new_id
obj['TARGETID'][:] = new_id
assert(len(truth) == args.nspec)
assert(np.all(targ['TARGETID'] == truth['TARGETID']))
assert(len(truth) == len(np.unique(truth['TARGETID'])))
assert(len(targ) == len(np.unique(targ['TARGETID'])))
assert(len(obj) == len(np.unique(obj['TARGETID'])))
truthfile = os.path.join(args.simdir,
'bgs_{}_{:03}_truth.fits'.format(args.simid, j))
write_templates(truthfile, flux, wave, targ, truth, obj)
# Generate simulated spectra, given observing conditions.
specfile = os.path.join(args.simdir,
'bgs_{}_{:03}_spectra.fits'.format(args.simid, j))
sim_spectra(wave, flux, 'bgs', specfile, obsconditions=obs,
sourcetype='bgs', targetid=truth['TARGETID'],
redshift=truth['TRUEZ'], seed=args.seed, expid=j)
| desihub/desisim | py/desisim/scripts/quickgalaxies.py | Python | bsd-3-clause | 13,799 | [
"Galaxy"
] | 43e7e082e66f2ab29c291c994ab23a552809afeb34393ab19b1f18b7a941edc4 |
#!/usr/bin/env python
# File: mk_ds9_rgb.py
# Created on: Mon 18 Jun 2012 11:20:57 AM CDT
# Last Change: Tue Mar 5 11:46:25 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import numpy as np
import sys
import os
import stat
# read info files from comamnd line
#data = np.loadtxt(sys.argv[1])
#Does the same thing...
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
#Enable sorting by a keyword
import operator
galaxies.sort(key=operator.attrgetter('ICD_IH'))
for a in range(1,600,100):
# open output file
f1 = open('display_stamps_contours'+str(a)+'.sh','wt')
# write everything to a single line
f1.writelines('#!/bin/bash\n')
f1.writelines('ds9 ')
#for ID, icd, mass, s, e, u in zip(data[:,1], data[:,2], data[:,3], data[:,4],
# data[:,5], data[:,6]):
i =1
for galaxy in galaxies:
ID = galaxy.ID
icd = galaxy.ICD_IH
#mass = np.log10(galaxy.Mass)
sersic = galaxy.sersic
#s = galaxy.Spiral
#e = galaxy.Elliptical
#u = galaxy.Uncertain
z = galaxy.z
#grad = galaxy.Color_grad
if galaxy.ston_I >= 30.:
if i >= a:
if galaxy.field ==1:
base ='./GSD_IJH_20kpc/'
# make the images
f1.writelines('-rgb ')
f1.writelines('-blue '+base+'GSD_'+str(int(ID))+'_I.fits ')
f1.writelines('-green '+base+'GSD_'+str(int(ID))+'_J.fits ')
f1.writelines('-red '+base+'GSD_'+str(int(ID))+'_H.fits ')
color='white'
f1.writelines('-regions command ')
label = "'"+str(int(ID))+"'"
f1.writelines('"circle 10 30 0 #color='+color+' text='+label+'" ')
f1.writelines('-regions command ')
z1 = "'%3.2f'" % z
f1.writelines('"circle 31 30 0 #color='+color+' text='+z1+'" ')
f1.writelines('-regions command ')
icd1 = "'%4.2f'" % icd
f1.writelines('"circle 31 0 0 #color='+color+' text='+icd1+'" ')
#f1.writelines('-regions command ')
#mass = "'%4.2f'" % mass
#f1.writelines('"circle 12 0 0#color='+color+' text='+mass+'" ')
f1.writelines('-regions command ')
if sersic != None:
sersic = "'%3.2f'" % sersic
f1.writelines('"circle 12 0 0 #color='+color+' text='+sersic+'" ')
else:
sersic =-1
sersic = "'%3.2f'" % sersic
f1.writelines('"circle 12 0 0 #color='+color+' text='+sersic+'" ')
base ='./GSD_contours/'
f1.writelines('-contour load '+base+'GSD_'+str(int(ID))+'_seg.con ')
elif galaxy.field==2:
base ='./UDF_IJH_20kpc/'
f1.writelines('-rgb ')
f1.writelines('-blue '+base+'UDF_'+str(int(ID))+'_I.fits ')
f1.writelines('-green '+base+'UDF_'+str(int(ID))+'_J.fits ')
f1.writelines('-red '+base+'UDF_'+str(int(ID))+'_H.fits ')
color='yellow'
f1.writelines('-regions command ')
label = "'"+str(int(ID))+"'"
f1.writelines('"circle 20 60 0 #color='+color+' text='+label+'" ')
f1.writelines('-regions command ')
z1 = "'%3.2f'" % z
f1.writelines('"circle 62 60 0 #color='+color+' text='+z1+'" ')
f1.writelines('-regions command ')
icd1 = "'%4.2f'" % icd
f1.writelines('"circle 62 0 0 #color='+color+' text='+icd1+'" ')
#f1.writelines('-regions command ')
#mass = "'%4.2f'" % mass
#f1.writelines('"circle 24 0 0#color='+color+' text='+mass+'" ')
f1.writelines('-regions command ')
if sersic != None:
sersic = "'%3.2f'" % sersic
f1.writelines('"circle 24 0 0 #color='+color+' text='+sersic+'" ')
else:
sersic =-1
sersic = "'%3.2f'" % sersic
f1.writelines('"circle 24 0 0 #color='+color+' text='+sersic+'" ')
base ='./UDF_contours/'
f1.writelines('-contour load '+base+'UDF_'+str(int(ID))+'_seg.con ')
i+=1
if i > a+99:
break
else:
pass
f1.close()
#os.fchmod('display_stamps.sh',stat.S_IXUSR)
| boada/ICD | sandbox/legacy_plot_code/mk_ds9_rgb.py | Python | mit | 4,819 | [
"Galaxy"
] | 8301bc8a33ea62604c5d400ae5d466772c01da906358230b0f668e9a25b1b264 |
"""
System class for biomolecules using AMBER ff.
Set up using prmtop and inpcrd files used in Amber GMIN and Optim.
Potential parameters (e.g. non-bonded cut-offs are set in
TODO:
Parameters
----------
prmtopFname : str
prmtop file name
inpcrdFname : str
inpcrd file name
See Also
--------
BaseSystem
"""
# utils
import numpy as np
import tempfile
import os
import shutil
# pele
from pele.systems import BaseSystem
from pele.mindist import ExactMatchAtomicCluster, MinPermDistAtomicCluster
from pele.transition_states import orthogopt
from pele.transition_states import InterpolatedPathDensity, NEB, create_NEB
from pele.landscape import smoothPath
from pele.systems import BaseParameters
from pele.utils.elements import elements
from pele.systems.spawn_OPTIM import SpawnOPTIM
from read_amber import parse_topology_file
__all__ = ["AMBERSystem"]
class AMBERSystem(BaseSystem):
def __init__(self, prmtopFname, inpcrdFname):
super(AMBERSystem, self).__init__()
self.prmtopFname = prmtopFname
self.inpcrdFname = inpcrdFname
self.parse_prmtop()
# self.potential = self.get_potential()
self.set_params(self.params)
# self.natoms = self.potential.prmtop.topology._numAtoms
self.params.database.accuracy = 1e-3
self.params.basinhopping["temperature"] = 1.
self.params.takestep_random_displacement = BaseParameters()
self.params.takestep_random_displacement.stepsize = 2.
self.params.basinhopping.insert_rejected = False
# self.sanitycheck = True # todo: this should be part of params and show up in GUI
self.sanitycheck = False
if self.sanitycheck:
# self.params.basinhopping.confCheck = [self.check_cistrans_wrapper, self.check_CAchirality_wrapper]
self.params.basinhopping.confCheck = [self.check_CAchirality_wrapper]
self.params.double_ended_connect.conf_checks = [self.check_cistrans_wrapper_kwargs, self.check_CAchirality_wrapper_kwargs]
def parse_prmtop(self):
self.prmtop_parsed = parse_topology_file(self.prmtopFname)
atoms = self.prmtop_parsed.atoms.nodes()
atoms = sorted(atoms, key=lambda a: a.index)
self.atom_names = [a.element for a in atoms]
self.bonds = [(a1.index, a2.index) for a1, a2 in
self.prmtop_parsed.atoms.edges_iter()]
# def get_minimizer(self, **kwargs):
# """return a function to minimize the structure"""
# # overriding the C++ minimizer which is giving an error with openmm potential
# pot = self.get_potential()
# # kwargs = dict_copy_update(self.params["structural_quench_params"], kwargs)
# # return lambda coords: lbfgs_cpp(coords, pot, **kwargs)
# from pele.optimize import lbfgs_py
# return lambda coords: lbfgs_py(coords, pot, **kwargs)
def get_ndof(self):
return 3. * len(self.atom_names)
def set_params(self, params):
"""set default parameters for the system"""
#set NEBparams
NEBparams = params.double_ended_connect.local_connect_params.NEBparams
NEBparams.NEBquenchParams = BaseParameters()
# NEBquenchParams = NEBparams.NEBquenchParams
NEBparams.iter_density = 15.
NEBparams.image_density = 3.5
NEBparams.max_images = 50
NEBparams.k = 100.
NEBparams.adjustk_freq = 5
if False: #use fire
from pele.optimize import fire
NEBparams.quenchRoutine = fire
else: #use lbfgs
NEBparams.NEBquenchParams.maxErise = 100.5
NEBparams.NEBquenchParams.maxstep = .1
NEBparams.NEBquenchParams.tol = 1e-2
NEBparams.reinterpolate = 50
NEBparams.adaptive_niter = True
NEBparams.adaptive_nimages = True
NEBparams.adjustk_freq = 50
#set transition state search params
tsSearchParams = params.double_ended_connect.local_connect_params.tsSearchParams
tsSearchParams.nsteps = 200
tsSearchParams.lowestEigenvectorQuenchParams["nsteps"] = 100
tsSearchParams.lowestEigenvectorQuenchParams["tol"] = 0.001
tsSearchParams.tangentSpaceQuenchParams["maxstep"] = .1
tsSearchParams.nfail_max = 1000
tsSearchParams.nsteps_tangent1 = 5
tsSearchParams.nsteps_tangent2 = 100
tsSearchParams.max_uphill_step = .3
#control the output
tsSearchParams.verbosity = 0
NEBparams.NEBquenchParams.iprint = 50
tsSearchParams.lowestEigenvectorQuenchParams["iprint"] = -50
tsSearchParams.tangentSpaceQuenchParams["iprint"] = -5
tsSearchParams["iprint"] = 10
# self.params.double_ended_connect.local_connect_params.pushoff_params.verbose = True
# self.params.double_ended_connect.local_connect_params.pushoff_params.stepmin = 1e-3
# self.params.double_ended_connect.local_connect_params.pushoff_params.gdiff = 100.
# #self.params.double_ended_connect.local_connect_params.pushoff_params.quenchRoutine = fire
def __call__(self):
return self
def get_potential(self):
""" First attempts to get the potential from GMIN, then from OpenMM. If both fail, sets it to None """
if hasattr(self, 'potential'):
if self.potential is not None:
return self.potential
# default is None
self.potential = None
# get potential from GMIN
if os.path.exists('min.in') and os.path.exists('data') :
print '\nFiles min.in and data found. trying to import ambgmin_ now ..'
try:
import ambgmin_
import gmin_potential
self.potential = gmin_potential.GMINAmberPotential(self.prmtopFname, self.inpcrdFname)
print '\namberSystem> Using GMIN Amber potential ..'
return self.potential ;
except ImportError:
# using OpenMM because ambgmin_ could not be imported
print '\namberSystem> could not import ambgmin_. Will try OpenMM .. '
# get potential from OpenMM
try:
import openmm_potential
self.potential = openmm_potential.OpenMMAmberPotential(self.prmtopFname, self.inpcrdFname)
print '\namberSystem> Using OpenMM amber potential ..'
# check for openmm version
# data structures changed between openmm4 and 5
# crude check - todo
if hasattr(self.potential.prmtop.topology._bonds,'index'):
self.OpenMMVer = 5
else:
self.OpenMMVer = 4
return self.potential
except AttributeError:
print '\namberSystem> could not import openmm_potential ..'
if self.potenial == None :
print '\namberSystem> potential not set. Could not import GMIN or OpenMM potential.'
def get_random_configuration(self):
"""set coordinates before calling BH etc."""
""" returns a 1-D numpy array of length 3xNatoms """
# using pele.amber.read_amber and inpcrd
from pele.amber.read_amber import read_amber_coords
coords = read_amber_coords(self.inpcrdFname)
print "amberSystem> Number of coordinates:", len(coords)
coords = np.reshape( np.transpose(coords), len(coords),1)
# -- OpenMM
#from simtk.unit import angstrom as openmm_angstrom
## using pdb
#from simtk.openmm.app import pdbfile as openmmpdbReader
#pdb = openmmpdbReader.PDBFile('coords.pdb') # todo: coords.pdb is hardcoded
#coords = pdb.getPositions() / openmm_angstrom
#coords = np.reshape(np.transpose(coords), 3*len(coords),1 )
## using input inpcrd
#from simtk.openmm.app import AmberInpcrdFile
#inpcrd = AmberInpcrdFile( self.inpcrdFname )
#coords = inpcrd.getPositions() / openmm_angstrom
#coords = np.reshape(np.transpose(coords), 3*len(coords),1 )
return coords
def get_metric_tensor(self, coords):
"""metric tensor for all masses m_i=1.0 """
print 'amberSystem> setting up mass matrix for normal modes'
# return np.identity(coords.size)
massMatrix_tmp = np.identity(coords.size)
# get masses from 'elements' file
for i in self.potential.prmtop.topology.atoms():
atomNum = i.index
atomElem = i.name[0] # assuming elements corresponding to first character of atom name
m = elements[atomElem]['mass']
massMatrix_tmp[atomNum][atomNum] = 1/m
return massMatrix_tmp
def get_permlist(self):
import pdb2permlist
#return [[0, 2, 3], [11, 12, 13], [19, 20, 21] ] # aladipep
#return [[0, 2, 3], [11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43], [49,50,51]] # tetraala
if os.path.exists('coordsModTerm.pdb'):
print '\namberSystem> constructing perm list from coordsModTerm.pdb'
print ' (see comments in amberPDB_to_permList.py)'
plist = pdb2permlist.pdb2permList('coordsModTerm.pdb')
print '\namberSystem> Groups of permutable atoms (atom numbers start at 0) = '
for i in plist:
print i
return plist
else:
print 'amberSystem> coordsModTerm.pdb not found. permlist could not be created.'
return []
def get_mindist(self):
permlist = self.get_permlist()
return MinPermDistAtomicCluster(permlist=permlist, niter=10, can_invert=False)
# def createNEB(self, coords1, coords2):
# pot = self.get_potential()
# NEBparams = self.params.double_ended_connect.local_connect_params.NEBparams
# return create_NEB(pot, coords1, coords2, verbose=True, **NEBparams)
def get_orthogonalize_to_zero_eigenvectors(self):
return orthogopt
def get_compare_exact(self, **kwargs):
permlist = self.get_permlist()
return ExactMatchAtomicCluster(permlist=permlist, **kwargs)
def smooth_path(self, path, **kwargs):
mindist = self.get_mindist()
return smoothPath(path, mindist, **kwargs)
def drawCylinder(self, X1, X2):
from OpenGL import GL,GLUT, GLU
z = np.array([0.,0.,1.]) #default cylinder orientation
p = X2-X1 #desired cylinder orientation
r = np.linalg.norm(p)
t = np.cross(z,p) #angle about which to rotate
a = np.arccos( np.dot( z,p) / r ) #rotation angle
a *= (180. / np.pi) #change units to angles
GL.glPushMatrix()
GL.glTranslate( X1[0], X1[1], X1[2] )
GL.glRotate( a, t[0], t[1], t[2] )
g=GLU.gluNewQuadric()
GLU.gluCylinder(g, .1,0.1,r,30,30) #I can't seem to draw a cylinder
GL.glPopMatrix()
def draw(self, coordsl, index):
from pele.systems._opengl_tools import draw_sphere
coords=coordsl.reshape([-1,3])
com=np.mean(coords, axis=0)
# draw atoms as spheres
for i, name in enumerate(self.atom_names):# in self.potential.prmtop.topology.atoms():
x = coords[i,:] - com
col = elements[name]['color']
if index == 2:
col = [0.5, 1.0, .5]
rad = elements[name]['radius']/5
draw_sphere(x, radius=rad, color=col)
# draw bonds
for atomPairs in self.bonds:#self.potential.prmtop.topology.bonds():
# note that atom numbers in topology start at 0
xyz1 = coords[atomPairs[0]] - com
xyz2 = coords[atomPairs[1]] - com
self.drawCylinder(xyz1, xyz2)
def load_coords_pymol(self, coordslist, oname, index=1):
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
#pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
#create the temporary file
suffix = ".pdb"
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix)
fname = f.name
from simtk.openmm.app import pdbfile as openmmpdb
#write the coords into pdb file
from pele.mindist import CoMToOrigin
ct = 0
for coords in coordslist:
ct = ct + 1
coords = CoMToOrigin(coords.copy())
self.potential.copyToLocalCoords(coords)
from simtk.unit import angstrom as openmm_angstrom
# openmmpdb.PDBFile.writeFile(self.potential.prmtop.topology , self.potential.localCoords * openmm_angstrom , file=sys.stdout, modelIndex=1)
openmmpdb.PDBFile.writeModel(self.potential.prmtop.topology , self.potential.localCoords * openmm_angstrom , file=f, modelIndex=ct)
print "closing file"
f.flush()
#load the molecule from the temporary file
pymol.cmd.load(fname)
#get name of the object just created and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
#set the representation
pymol.cmd.hide("everything", oname)
pymol.cmd.show("lines", oname)
# #set the color according to index
# if index == 1:
# pymol.cmd.color("red", oname)
# else:
# pymol.cmd.color("blue", oname)
def get_optim_spawner(self, coords1, coords2):
import os
from pele.config import config
optim = config.get("exec", "AMBOPTIM")
optim = os.path.expandvars(os.path.expanduser(optim))
print "optim executable", optim
return AmberSpawnOPTIM(coords1, coords2, self, OPTIM=optim, tempdir=False)
def populate_peptideAtomList(self):
listofC = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "C"]
listofO = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "O"]
listofN = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "N"]
listofH = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "H"]
# atom numbers of peptide bond
self.peptideBondAtoms = []
for i in listofC:
if listofO.__contains__(i+1) and listofN.__contains__(i+2) and listofH.__contains__(i+3):
self.peptideBondAtoms.append([i,i+1,i+2,i+3])
print '\namberSystem> Peptide bond atom numbers (C,O,N,H, in order): '
for i in self.peptideBondAtoms:
print i
def populate_CAneighborList(self):
listofCA = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "CA"]
listofC = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "C"]
listofN = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "N"]
listofCB = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "CB"]
# atom numbers of peptide bond
self.CAneighborList = []
for i in listofCA:
# find atoms bonded to CA
neighborlist = []
for b in self.potential.prmtop.topology.bonds():
# print b
if b[0] == i:
neighborlist.append(b[1])
if b[1] == i:
neighborlist.append(b[0])
# Commented, since this stuff doesn't seem to work at the moment...
# if self.OpenMMVer == 5 :
# # openmm5
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
# else: # openmm4
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
#print '---bonds = ', b[0].index , b[1].index
#print '---amberSystem> atoms bonded to CA ',i, ' = ', neighborlist
nn = [i]
# append C (=O)
for n in neighborlist:
if listofC.__contains__(n):
nn.append(n)
# append CB
for n in neighborlist:
if listofCB.__contains__(n):
nn.append(n)
# append N
for n in neighborlist:
if listofN.__contains__(n):
nn.append(n)
self.CAneighborList.append(nn)
# atoms numbers start at 0
print '\namberSystem> CA neighbors atom numbers (CA,C(=O),CB, N, in order): '
for i in self.CAneighborList:
print i
def check_cistrans_wrapper_kwargs(self, coords=None, **kwargs):
print 'in check_cistrans_wrapper_kwargs'
return self.check_cistrans(coords)
def check_cistrans_wrapper(self, energy, coords, **kwargs):
return self.check_cistrans(coords)
def check_cistrans(self, coords):
"""
Sanity check on the isomer state of peptide bonds
Returns False if the check fails i.e. if any of the peptide bond is CIS
"""
if not hasattr(self, "peptideBondAtoms"):
# atom numbers of peptide bonds
self.populate_peptideAtomList()
import measure
m = measure.Measure()
isTrans = True
for i in self.peptideBondAtoms:
atNum = i[0]
rC = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[1]
rO = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[2]
rN = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[3]
rH = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
# compute O-C-N-H torsion angle
rad, deg = m.torsion(rO,rC,rN,rH)
# print 'peptide torsion (deg) ', i, ' = ', deg
# check cis
if deg < 90 or deg > 270:
isTrans = False
print 'CIS peptide bond between atoms ', i, ' torsion (deg) = ', deg
return isTrans
def check_CAchirality_wrapper_kwargs(self, coords=None, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality_wrapper(self, energy, coords, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality(self, coords):
"""
Sanity check on the CA to check if it is L of D
Returns False if the check fails i.e. if any D-amino acid is present
"""
if not hasattr(self, "CAneighborList"):
# atom numbers of CA neighbors
self.populate_CAneighborList()
# print 'in check CA chirality'
import measure
m = measure.Measure()
isL = True
for i in self.CAneighborList:
atNum = i[0]
rCA = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[1]
rC = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[2]
rCB = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[3]
rN = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
# compute improper torsion angle between C-CA-CB and CA-CB-N
rad, deg = m.torsion(rC,rCA,rCB,rN)
# check cis
if deg < 180 :
# this condition was found by inspection of structures todo
isL = False
print 'chiral state of CA atom ', i[0], ' is D'
print 'CA improper torsion (deg) ', i, ' = ', deg
return isL
def test_potential(self, pdbfname ):
""" tests amber potential for pdbfname
Input
-----
pdbfname = full path to pdb file
"""
# read a conformation from pdb file
print 'reading conformation from coords.pdb'
from simtk.openmm.app import pdbfile as openmmpdb
from simtk.unit import angstrom as openmm_angstrom
pdb = openmmpdb.PDBFile(pdbfname)
coords = pdb.getPositions() / openmm_angstrom
coords = np.reshape(np.transpose(coords), 3*len(coords), 1)
self.potential = self.get_potential()
e = self.potential.getEnergy(coords)
print 'Energy (kJ/mol) = '
print e
e, g = self.potential.getEnergyGradient(coords)
gnum = self.potential.NumericalDerivative(coords, eps=1e-6)
print 'Energy (kJ/mol) = '
print e
print 'Analytic Gradient = '
print g[1:3]
print 'Numerical Gradient = '
print gnum[1:3]
print 'Num vs Analytic Gradient ='
print np.max(np.abs(gnum-g)), np.max(np.abs(gnum))
print np.max(np.abs(gnum-g)) / np.max(np.abs(gnum))
def test_connect(self, database):
#connect the all minima to the lowest minimum
minima = database.minima()
min1 = minima[0]
for min2 in minima[1:]:
connect = self.get_double_ended_connect(min1, min2, database)
connect.connect
def test_disconn_graph(self,database):
from pele.utils.disconnectivity_graph import DisconnectivityGraph
from pele.landscape import TSGraph
import matplotlib.pyplot as plt
graph = TSGraph(database).graph
dg = DisconnectivityGraph(graph, nlevels=3, center_gmin=True)
dg.calculate()
dg.plot()
plt.show()
def test_BH_group_rotation(self, db, nsteps, parameters):
from playground.group_rotation.group_rotation import GroupRotation
take_step_gr = GroupRotation(parameters)
self.params.basinhopping["temperature"] = 10.0
bh = self.get_basinhopping(database=db, takestep = take_step_gr)
print "Running BH with group rotation ..."
bh.run(nsteps)
print "Number of minima found = ", len(db.minima())
min0 = db.minima()[0]
print "lowest minimum found has energy = ", min0.energy
def test_BH(self,db,nsteps):
self.potential = self.get_potential()
from pele.takestep import RandomDisplacement, AdaptiveStepsizeTemperature
takeStepRnd = RandomDisplacement( stepsize=2 )
tsAdaptive = AdaptiveStepsizeTemperature(takeStepRnd, interval=10, verbose=True)
self.params.basinhopping["temperature"] = 10.0
# todo - how do you save N lowest?
bh = self.get_basinhopping(database=db, takestep = takeStepRnd)
bh = self.get_basinhopping(database=db, takestep = tsAdaptive)
print 'Running BH .. '
bh.run(nsteps)
print "Number of minima found = ", len(db.minima())
min0 = db.minima()[0]
print "lowest minimum found has energy = ", min0.energy
def test_mindist(self, db):
m1, m2 = db.minima()[:2]
mindist = sys.get_mindist()
dist, c1, c2 = mindist(m1.coords, m2.coords)
print "distance", dist
class AmberSpawnOPTIM(SpawnOPTIM):
def __init__(self, coords1, coords2, sys, **kwargs):
super(AmberSpawnOPTIM, self).__init__(coords1, coords2, **kwargs)
self.sys = sys
def write_odata_coords(self, coords, fout):
pass
def write_perm_allow(self, fname):
permallow = self.make_permallow_from_permlist(self.sys.get_permlist())
with open(fname, "w") as fout:
fout.write(permallow)
def write_additional_input_files(self, rundir, coords1, coords2):
#write start
with open(rundir + "/start", "w") as fout:
for xyz in coords1.reshape(-1,3):
fout.write( "%f %f %f\n" % tuple(xyz))
#write coords.prmtop and coords.inpcrd
shutil.copyfile(self.sys.prmtopFname, rundir + "/coords.prmtop")
shutil.copyfile(self.sys.inpcrdFname, rundir + "/coords.inpcrd")
min_in = """
STOP
&cntrl
imin = 1,
ncyc = 1,
maxcyc = 1,
igb = 0,
ntb = 0,
cut = 999.99,
rgbmax = 25.0,
ifswitch = 1
/
"""
with open(rundir + "/min.in", "w") as fout:
fout.write(min_in)
def write_odata(self, fout):
odatastr = """
DUMPALLPATHS
UPDATES 6000
NEWCONNECT 15 3 2.0 20.0 30 0.5
CHECKCHIRALITY
comment PATH dumps intermediate conformations along the path
PATH 100 1.0D-2
COMMENT NEWNEB 30 500 0.01
NEBK 10.0
comment DUMPNEBXYZ
AMBERIC
comment AMBERSTEP
DIJKSTRA EXP
DUMPALLPATHS
REOPTIMISEENDPOINTS
COMMENT MAXTSENERGY -4770.0
EDIFFTOL 1.0D-4
MAXERISE 1.0D-4 1.0D0
GEOMDIFFTOL 0.05D0
BFGSTS 500 10 100 0.01 100
NOIT
BFGSMIN 1.0D-6
PERMDIST
MAXSTEP 0.1
TRAD 0.2
MAXMAX 0.3
BFGSCONV 1.0D-6
PUSHOFF 0.1
STEPS 800
BFGSSTEPS 2000
MAXBFGS 0.1
NAB start
"""
fout.write(odatastr)
fout.write("\n")
# ============================ MAIN ================================
if __name__ == "__main__":
# create new amber system
sysAmb = AMBERSystem('../../examples/amber/aladipep/coords.prmtop', '../../examples/amber/aladipep/coords.inpcrd')
# load existing database
from pele.storage import Database
dbcurr = Database(db="../../examples/amber/aladipep/aladipep.db")
coords = sysAmb.get_random_configuration()
# aa = sysAmb.get_metric_tensor(coords)
# ------- TEST gui
from pele.gui import run as gr
gr.run_gui(sysAmb)
# ------ Test potential
sysAmb.test_potential("../../examples/amber/aladipep/coords.pdb")
# ------ BH
nsteps = 100
sysAmb.test_BH(dbcurr, nsteps)
exit()
# ------- Connect runs
sysAmb.test_connect(dbcurr)
# ------- Disconn graph
sysAmb.test_disconn_graph(dbcurr)
# ------- Test mindist
sysAmb.test_mindist( dbcurr)
| js850/pele | pele/amber/amberSystem.py | Python | gpl-3.0 | 28,485 | [
"Amber",
"OpenMM",
"PyMOL"
] | afeb5bcb5815645a8a283fe19bc0fd798d6e330c1e16622f82ccc6bb5505575a |
from ase.atoms import Atoms
def read_dftb(filename='dftb_in.hsd'):
"""Method to read coordinates form DFTB+ input file dftb_in.hsd
additionally read information about fixed atoms
and periodic boundary condition
"""
from ase import Atoms
if isinstance(filename, str):
myfile = open(filename)
lines = myfile.readlines()
atoms_pos = []
atom_symbols = []
type_names = []
my_pbc = False
mycell = []
for iline, line in enumerate(lines):
if (line.strip().startswith('#')):
pass
else:
if ('TypeNames' in line):
col = line.split()
for i in range(3, len(col)-1):
type_names.append(col[i].strip("\""))
elif ('Periodic' in line):
if ('Yes' in line):
my_pbc = True
elif ('LatticeVectors' in line):
for imycell in range(3):
extraline = lines[iline+imycell+1]
cols = extraline.split()
mycell.append(\
[float(cols[0]),float(cols[1]),float(cols[2])])
else:
pass
start_reading_coords = False
stop_reading_coords = False
for line in lines:
if (line.strip().startswith('#')):
pass
else:
if ('TypesAndCoordinates' in line):
start_reading_coords = True
if start_reading_coords:
if ('}' in line):
stop_reading_coords = True
if (start_reading_coords and not(stop_reading_coords)
and not 'TypesAndCoordinates' in line):
typeindexstr, xxx, yyy, zzz = line.split()[:4]
typeindex = int(typeindexstr)
symbol = type_names[typeindex-1]
atom_symbols.append(symbol)
atoms_pos.append([float(xxx), float(yyy), float(zzz)])
if isinstance(filename, str):
myfile.close()
atoms = Atoms(positions = atoms_pos, symbols = atom_symbols,
cell = mycell, pbc = my_pbc)
return atoms
def read_dftb_velocities(atoms, filename='geo_end.xyz'):
"""Method to read velocities (AA/ps) from DFTB+ output file geo_end.xyz
"""
from ase import Atoms
from ase.units import second
#AA/ps -> ase units
AngdivPs2ASE = 1.0/(1e-12*second)
if isinstance(filename, str):
myfile = open(filename)
lines = myfile.readlines()
#remove empty lines
lines_ok = []
for line in lines:
if line.rstrip():
lines_ok.append(line)
velocities = []
natoms = atoms.get_number_of_atoms()
last_lines = lines_ok[-natoms:]
for iline, line in enumerate(last_lines):
inp = line.split()
velocities.append([float(inp[4])*AngdivPs2ASE,
float(inp[5])*AngdivPs2ASE,
float(inp[6])*AngdivPs2ASE])
atoms.set_velocities(velocities)
return atoms
def write_dftb_velocities(atoms, filename='velocities.txt'):
"""Method to write velocities (in atomic units) from ASE
to a file to be read by dftb+
"""
from ase import Atoms
from ase.units import AUT, Bohr
#ase units -> atomic units
ASE2au = Bohr / AUT
if isinstance(filename, str):
myfile = open(filename, 'w')
else: # Assume it's a 'file-like object'
myfile = filename
velocities = atoms.get_velocities()
for velocity in velocities:
myfile.write(' %19.16f %19.16f %19.16f \n'
%( velocity[0] / ASE2au,
velocity[1] / ASE2au,
velocity[2] / ASE2au))
return
def write_dftb(filename, atoms):
"""Method to write atom structure in DFTB+ format
(gen format)
"""
import numpy as np
#sort
atoms.set_masses()
masses = atoms.get_masses()
indexes = np.argsort(masses)
atomsnew = Atoms()
for i in indexes:
atomsnew = atomsnew + atoms[i]
if isinstance(filename, str):
myfile = open(filename, 'w')
else: # Assume it's a 'file-like object'
myfile = filename
ispbc = atoms.get_pbc()
box = atoms.get_cell()
if (any(ispbc)):
myfile.write('%8d %2s \n' %(len(atoms), 'S'))
else:
myfile.write('%8d %2s \n' %(len(atoms), 'C'))
chemsym = atomsnew.get_chemical_symbols()
allchem = ''
for i in chemsym:
if i not in allchem:
allchem = allchem + i + ' '
myfile.write(allchem+' \n')
coords = atomsnew.get_positions()
itype = 1
for iatom, coord in enumerate(coords):
if iatom > 0:
if chemsym[iatom] != chemsym[iatom-1]:
itype = itype+1
myfile.write('%5i%5i %19.16f %19.16f %19.16f \n' \
%(iatom+1, itype,
coords[iatom][0], coords[iatom][1], coords[iatom][2]))
# write box
if (any(ispbc)):
#dftb dummy
myfile.write(' %19.16f %19.16f %19.16f \n' %(0, 0, 0))
myfile.write(' %19.16f %19.16f %19.16f \n'
%( box[0][0], box[0][1], box[0][2]))
myfile.write(' %19.16f %19.16f %19.16f \n'
%( box[1][0], box[1][1], box[1][2]))
myfile.write(' %19.16f %19.16f %19.16f \n'
%( box[2][0], box[2][1], box[2][2]))
if isinstance(filename, str):
myfile.close()
| suttond/MODOI | ase/io/dftb.py | Python | lgpl-3.0 | 5,488 | [
"ASE"
] | 28263ea6ab37013efe6268045d5351b77505ba46985c802972b25761a71a392b |
#!/usr/bin/env python
import numpy as np
import tables as tb
import sys,os
import cPickle
three_letter_aa = dict(
A='ALA', C='CYS', D='ASP', E='GLU',
F='PHE', G='GLY', H='HIS', I='ILE',
K='LYS', L='LEU', M='MET', N='ASN',
P='PRO', Q='GLN', R='ARG', S='SER',
T='THR', V='VAL', W='TRP', Y='TYR')
aa_num = dict([(k,i) for i,k in enumerate(sorted(three_letter_aa.values()))])
one_letter_aa = dict([(v,k) for k,v in three_letter_aa.items()])
deg=np.deg2rad(1)
default_filter = tb.Filters(complib='zlib', complevel=5, fletcher32=True)
n_bit_rotamer = 4
def highlight_residues(name, fasta, residues_to_highlight):
fasta_one_letter = [one_letter_aa[x] for x in fasta]
residues_to_highlight = set(residues_to_highlight)
print '%s: %s' % (name, ''.join((f.upper() if i in residues_to_highlight else f.lower()) for i,f in enumerate(fasta_one_letter)))
def vmag(x):
assert x.shape[-1] == 3
return np.sqrt(x[...,0]**2+x[...,1]**2+x[...,2]**2)
def create_array(grp, nm, obj=None):
return t.create_earray(grp, nm, obj=obj, filters=default_filter)
def write_cavity_radial(cavity_radius):
g = t.create_group(t.root.input.potential, 'cavity_radial')
g._v_attrs.arguments = np.array(['pos'])
create_array(g, 'id', np.arange(n_atom))
create_array(g, 'radius', np.ones(n_atom)*cavity_radius)
create_array(g, 'spring_constant', np.ones(n_atom)*5.)
def write_z_flat_bottom(parser, fasta, z_spring_table):
fields = [ln.split() for ln in open(z_spring_table,'U')]
header = 'residue z0 radius spring_constant'
actual_header = [x.lower() for x in fields[0]]
if actual_header != header.split():
parser.error('First line of z-flat-bottom table must be "%s" but is "%s"'
%(header," ".join(actual_header)))
if not all(len(f)==len(fields[0]) for f in fields):
parser.error('Invalid format for z-flat-bottom file')
fields = fields[1:]
n_spring = len(fields)
g = t.create_group(t.root.input.potential, 'z_flat_bottom')
g._v_attrs.arguments = np.array(['pos'])
atom = np.zeros((n_spring), dtype='i')
z0 = np.zeros((n_spring,))
radius = np.zeros((n_spring,))
spring_constant = np.zeros((n_spring,))
for i,f in enumerate(fields):
res = int(f[0])
msg = 'Z_flat energy specified for residue %i (zero is first residue) but there are only %i residues in the FASTA'
if not (0 <= res < len(fasta)): raise ValueError(msg % (res, len(fasta)))
atom[i] = int(f[0])*3 + 1 # restrain the CA atom in each residue
z0[i] = float(f[1])
radius[i] = float(f[2])
spring_constant[i] = float(f[3])
create_array(g, 'atom', obj=atom)
create_array(g, 'z0', obj=z0)
create_array(g, 'radius', obj=radius)
create_array(g, 'spring_constant', obj=spring_constant)
def write_tension(parser, fasta, tension_table):
fields = [ln.split() for ln in open(tension_table,'U')]
header = 'residue tension_x tension_y tension_z'
actual_header = [x.lower() for x in fields[0]]
if actual_header != header.split():
parser.error('First line of tension table must be "%s" but is "%s"'
%(header," ".join(actual_header)))
if not all(len(f)==len(fields[0]) for f in fields):
parser.error('Invalid format for tension file')
fields = fields[1:]
n_spring = len(fields)
g = t.create_group(t.root.input.potential, 'tension')
g._v_attrs.arguments = np.array(['pos'])
atom = np.zeros((n_spring,), dtype='i')
tension = np.zeros((n_spring,3))
for i,f in enumerate(fields):
res = int(f[0])
msg = 'tension energy specified for residue %i (zero is first residue) but there are only %i residues in the FASTA'
if not (0 <= res < len(fasta)): raise ValueError(msg % (res, len(fasta)))
atom[i] = int(f[0])*3 + 1 # restrain the CA atom in each residue
tension[i] = [float(x) for x in (f[1],f[2],f[3])]
create_array(g, 'atom', obj=atom)
create_array(g, 'tension_coeff', obj=tension)
def write_AFM(parser, fasta, AFM_table, time_initial, time_step):
fields = [ln.split() for ln in open(AFM_table, 'U')]
header = 'residue spring_const tip_pos_x tip_pos_y tip_pos_z pulling_vel_x pulling_vel_y pulling_vel_z'
actual_header = [x.lower() for x in fields[0]]
if actual_header != header.split():
parser.error('First line of tension table must be "%s" but is "%s"'
%(header," ".join(actual_header)))
if not all(len(f)==len(fields[0]) for f in fields):
parser.error('Invalid format for AFM file')
fields = fields[1:]
n_spring = len(fields)
g = t.create_group(t.root.input.potential, 'AFM')
g._v_attrs.arguments = np.array(['pos'])
atom = np.zeros((n_spring,), dtype='i')
spring_const = np.zeros((n_spring,))
starting_tip_pos = np.zeros((n_spring,3))
pulling_vel = np.zeros((n_spring,3))
for i,f in enumerate(fields):
res = int(f[0])
msg = 'AFM energy specified for residue %i (zero is first residue) but there are only %i residues in the FASTA'
if not (0 <= res < len(fasta)):
raise ValueError(msg % (res, len(fasta)))
atom[i] = int(f[0])*3 + 1 # restrain the CA atom in each residue
spring_const[i] = f[1]
starting_tip_pos[i] = [float(x) for x in (f[2],f[3],f[4])]
pulling_vel[i] = [float(x) for x in (f[5],f[6],f[7])]
create_array(g, 'atom', obj=atom)
create_array(g, 'spring_const', obj=spring_const)
create_array(g, 'starting_tip_pos', obj=starting_tip_pos)
create_array(g, 'pulling_vel', obj=pulling_vel)
g.pulling_vel._v_attrs.time_initial = time_initial
g.pulling_vel._v_attrs.time_step = time_step
def write_backbone_pair(fasta):
n_res = len(fasta)
grp = t.create_group(potential, 'backbone_pairs')
grp._v_attrs.arguments = np.array(['affine_alignment'])
ref_pos = np.zeros((n_res,4,3))
ref_pos[:,0] = (-1.19280531, -0.83127186, 0.) # N
ref_pos[:,1] = ( 0., 0., 0.) # CA
ref_pos[:,2] = ( 1.25222632, -0.87268266, 0.) # C
ref_pos[:,3] = ( 0., 0.94375626, 1.2068012) # CB
ref_pos[fasta=='GLY',3] = np.nan
ref_pos -= ref_pos[:,:3].mean(axis=1)[:,None]
create_array(grp, 'id', obj=np.arange(n_res))
create_array(grp, 'ref_pos', obj=ref_pos)
create_array(grp, 'n_atom', obj=np.isfinite(grp.ref_pos[:].sum(axis=-1)).sum(axis=-1))
def write_affine_alignment(n_res):
grp = t.create_group(potential, 'affine_alignment')
grp._v_attrs.arguments = np.array(['pos'])
ref_geom = np.zeros((n_res,3,3))
ref_geom[:,0] = (-1.19280531, -0.83127186, 0.) # N
ref_geom[:,1] = ( 0., 0., 0.) # CA
ref_geom[:,2] = ( 1.25222632, -0.87268266, 0.) # C
ref_geom -= ref_geom.mean(axis=1)[:,None]
N = np.arange(n_res)*3 + 0
CA = np.arange(n_res)*3 + 1
C = np.arange(n_res)*3 + 2
atoms = np.column_stack((N,CA,C))
create_array(grp, 'atoms', obj=atoms)
create_array(grp, 'ref_geom', obj=ref_geom)
def write_infer_H_O(fasta, excluded_residues):
n_res = len(fasta)
# note that proline is not an hbond donor since it has no NH
donor_residues = np.array([i for i in range(n_res) if i>0 and i not in excluded_residues and fasta[i]!='PRO'])
acceptor_residues = np.array([i for i in range(n_res) if i<n_res-1 and i not in excluded_residues])
print
print 'hbond, %i donors, %i acceptors in sequence' % (len(donor_residues), len(acceptor_residues))
H_bond_length = 0.88
O_bond_length = 1.24
grp = t.create_group(potential, 'infer_H_O')
grp._v_attrs.arguments = np.array(['pos'])
donors = t.create_group(grp, 'donors')
acceptors = t.create_group(grp, 'acceptors')
create_array(donors, 'residue', obj=donor_residues)
create_array(acceptors, 'residue', obj=acceptor_residues)
create_array(donors, 'bond_length', obj=H_bond_length*np.ones(len( donor_residues)))
create_array(acceptors, 'bond_length', obj=O_bond_length*np.ones(len(acceptor_residues)))
create_array(donors, 'id', obj=np.array((-1,0,1))[None,:] + 3*donor_residues [:,None])
create_array(acceptors, 'id', obj=np.array(( 1,2,3))[None,:] + 3*acceptor_residues[:,None])
def write_environment(fasta, environment_library, sc_node_name, pl_node_name):
with tb.open_file(environment_library) as lib:
energies = lib.root.energies[:]
energies_x_offset = lib.root.energies._v_attrs.offset
energies_x_inv_dx = lib.root.energies._v_attrs.inv_dx
restype_order = dict([(str(x),i) for i,x in enumerate(lib.root.restype_order[:])])
coverage_param = lib.root.coverage_param[:]
assert coverage_param.shape == (len(restype_order),1,4)
# params are r0,r_sharpness, dot0, dot_sharpness
# Place CB
pgrp = t.create_group(potential, 'placement_fixed_point_vector_only_CB')
pgrp._v_attrs.arguments = np.array(['affine_alignment'])
ref_pos = np.zeros((4,3))
ref_pos[0] = (-1.19280531, -0.83127186, 0.) # N
ref_pos[1] = ( 0., 0., 0.) # CA
ref_pos[2] = ( 1.25222632, -0.87268266, 0.) # C
ref_pos[3] = ( 0., 0.94375626, 1.2068012) # CB
# FIXME this places the CB in a weird location since I should have
# used ref_pos[:3].mean(axis=0,keepdims=1) instead. I cannot change
# this without re-running training. Thankfully, it is a fairly small
# mistake and probably irrelevant with contrastive divergence training.
ref_pos -= ref_pos.mean(axis=0,keepdims=1)
placement_data = np.zeros((1,6))
placement_data[0,0:3] = ref_pos[3]
placement_data[0,3:6] = (ref_pos[3]-ref_pos[2])/vmag(ref_pos[3]-ref_pos[2])
create_array(pgrp, 'affine_residue', np.arange(len(fasta)))
create_array(pgrp, 'layer_index', np.zeros(len(fasta),dtype='i'))
create_array(pgrp, 'placement_data', placement_data)
# Bring position and probability together for the side chains
wgrp = t.create_group(potential, 'weighted_pos')
wgrp._v_attrs.arguments = np.array([sc_node_name, pl_node_name])
sc_node = t.get_node(t.root.input.potential, sc_node_name)
n_sc = sc_node.affine_residue.shape[0]
create_array(wgrp, 'index_pos', np.arange(n_sc))
create_array(wgrp, 'index_weight', np.arange(n_sc))
# Compute SC coverage of the CB
cgrp = t.create_group(potential, 'environment_coverage')
cgrp._v_attrs.arguments = np.array(['placement_fixed_point_vector_only_CB','weighted_pos'])
# group1 is the source CB
create_array(cgrp, 'index1', np.arange(len(fasta)))
create_array(cgrp, 'type1', np.array([restype_order[s] for s in fasta])) # one type per CB type
create_array(cgrp, 'id1', np.arange(len(fasta)))
# group 2 is the weighted points to interact with
create_array(cgrp, 'index2', np.arange(n_sc))
create_array(cgrp, 'type2', 0*np.arange(n_sc)) # for now coverage is very simple, so no types on SC
create_array(cgrp, 'id2', sc_node.affine_residue[:])
create_array(cgrp, 'interaction_param', coverage_param)
# # Transform coverage to [0,1] scale (1 indicates the most buried)
# tgrp = t.create_group(potential, 'uniform_transform_environment')
# tgrp._v_attrs.arguments = np.array(['environment_coverage'])
# create_array(tgrp, 'bspline_coeff', coverage_transform)
# tgrp.bspline_coeff._v_attrs.spline_offset = coverage_transform_offset
# tgrp.bspline_coeff._v_attrs.spline_inv_dx = coverage_transform_inv_dx
# # Linearly couple the transform to energies
# egrp = t.create_group(potential, 'linear_coupling_uniform_environment')
# egrp._v_attrs.arguments = np.array(['uniform_transform_environment'])
# create_array(egrp, 'couplings', energies)
# create_array(egrp, 'coupling_types', [restype_order[s] for s in fasta])
# Couple an energy to the coverage coordinates
egrp = t.create_group(potential, 'nonlinear_coupling_environment')
egrp._v_attrs.arguments = np.array(['environment_coverage'])
create_array(egrp, 'coeff', energies)
egrp.coeff._v_attrs.spline_offset = energies_x_offset
egrp.coeff._v_attrs.spline_inv_dx = energies_x_inv_dx
create_array(egrp, 'coupling_types', [restype_order[s] for s in fasta])
def write_count_hbond(fasta, hbond_energy, coverage_library, loose_hbond, sc_node_name):
n_res = len(fasta)
infer_group = t.get_node('/input/potential/infer_H_O')
n_donor = infer_group.donors .id.shape[0]
n_acceptor = infer_group.acceptors.id.shape[0]
igrp = t.create_group(potential, 'protein_hbond')
igrp._v_attrs.arguments = np.array(['infer_H_O'])
# group1 is the HBond donors
create_array(igrp, 'index1', np.arange(0,n_donor))
create_array(igrp, 'type1', np.zeros(n_donor, dtype='i'))
create_array(igrp, 'id1', infer_group.donors.residue[:])
# group 2 is the HBond acceptors
create_array(igrp, 'index2', np.arange(n_donor,n_donor+n_acceptor))
create_array(igrp, 'type2', np.zeros(n_acceptor, dtype='i'))
create_array(igrp, 'id2', infer_group.acceptors.residue[:])
# parameters are inner_barrier, inner_scale, outer_barrier, outer_scale, wall_dp, inv_dp_width
create_array(igrp, 'interaction_param', np.array([[
[(0.5 if loose_hbond else 1.4 ), 1./0.10,
(3.1 if loose_hbond else 2.5 ), 1./0.125,
(0.182 if loose_hbond else 0.682), 1./0.05,
0., 0.]]]))
if sc_node_name: # only create hbond_coverage if there are rotamer side chains
cgrp = t.create_group(potential, 'hbond_coverage')
cgrp._v_attrs.arguments = np.array(['protein_hbond',sc_node_name])
with tb.open_file(coverage_library) as data:
create_array(cgrp, 'interaction_param', data.root.coverage_interaction[:])
bead_num = dict((k,i) for i,k in enumerate(data.root.bead_order[:]))
hydrophobe_placement = data.root.hydrophobe_placement[:]
hydrophobe_interaction = data.root.hydrophobe_interaction[:]
# group1 is the HBond partners
create_array(cgrp, 'index1', np.arange(n_donor+n_acceptor))
create_array(cgrp, 'type1', 1*(np.arange(n_donor+n_acceptor) >= n_donor)) # donor is 0, acceptor is 1
create_array(cgrp, 'id1', np.concatenate([infer_group.donors .residue[:],
infer_group.acceptors.residue[:]]))
# group 2 is the sidechains
sc_node = t.get_node(t.root.input.potential, sc_node_name)
rseq = sc_node.beadtype_seq[:]
sc_resnum = sc_node.affine_residue[:]
create_array(cgrp, 'index2', np.arange(len(rseq)))
create_array(cgrp, 'type2', np.array([bead_num[s] for s in rseq]))
create_array(cgrp, 'id2', sc_resnum)
grp = t.create_group(potential, 'placement_fixed_point_vector_scalar')
grp._v_attrs.arguments = np.array(['affine_alignment'])
create_array(grp, 'affine_residue', np.arange(3*n_res)/3)
create_array(grp, 'layer_index', np.arange(3*n_res)%3)
create_array(grp, 'placement_data', hydrophobe_placement)
cgrp = t.create_group(potential, 'hbond_coverage_hydrophobe')
cgrp._v_attrs.arguments = np.array(['placement_fixed_point_vector_scalar',sc_node_name])
with tb.open_file(coverage_library) as data:
create_array(cgrp, 'interaction_param', data.root.hydrophobe_interaction[:])
bead_num = dict((k,i) for i,k in enumerate(data.root.bead_order[:]))
# group1 is the hydrophobes
# create_array(cgrp, 'index1', np.arange(n_res))
# create_array(cgrp, 'type1', 0*np.arange(n_res))
# create_array(cgrp, 'id1', np.arange(n_res))
create_array(cgrp, 'index1', np.arange(3*n_res))
create_array(cgrp, 'type1', np.arange(3*n_res)%3)
create_array(cgrp, 'id1', np.arange(3*n_res)/3)
# group 2 is the sidechains
rseq = sc_node.beadtype_seq[:]
create_array(cgrp, 'index2', np.arange(len(rseq)))
create_array(cgrp, 'type2', np.array([bead_num[s] for s in rseq]))
create_array(cgrp, 'id2', sc_resnum)
if hbond_energy > 0.:
print '\n**** WARNING **** hydrogen bond formation energy set to repulsive value\n'
grp = t.create_group(potential, 'hbond_energy')
grp._v_attrs.arguments = np.array(['protein_hbond'])
grp._v_attrs.protein_hbond_energy = hbond_energy
def make_restraint_group(group_num, residues, initial_pos, strength):
np.random.seed(314159) # make groups deterministic
grp = t.root.input.potential.dist_spring
id = grp.id[:]
equil_dist = grp.equil_dist[:]
spring_const = grp.spring_const[:]
bonded_atoms = grp.bonded_atoms[:]
n_orig = id.shape[0]
r_atoms = np.array([(3*i+0,3*i+1,3*i+2) for i in sorted(residues)]).reshape((-1,))
random_pairing = lambda: np.column_stack((r_atoms, np.random.permutation(r_atoms)))
pairs = np.concatenate([random_pairing() for i in range(2)], axis=0)
pairs = [((x,y) if x<y else (y,x)) for x,y in pairs if x/3!=y/3] # avoid same-residue restraints
pairs = np.array(sorted(set(pairs)))
pair_dists = vmag(initial_pos[pairs[:,0]]-initial_pos[pairs[:,1]])
grp.id._f_remove()
grp.equil_dist._f_remove()
grp.spring_const._f_remove()
grp.bonded_atoms._f_remove()
create_array(grp, 'id', obj=np.concatenate((id, pairs), axis=0))
create_array(grp, 'equil_dist', obj=np.concatenate((equil_dist, pair_dists), axis=0))
create_array(grp, 'spring_const', obj=np.concatenate((spring_const,strength*np.ones(len(pairs))),axis=0))
create_array(grp, 'bonded_atoms', obj=np.concatenate((bonded_atoms,np.zeros(len(pairs),dtype='int')),axis=0))
def make_tab_matrices(phi, theta, bond_length):
'''TAB matrices are torsion-angle-bond affine transformation matrices'''
phi = np.asarray(phi)
theta = np.asarray(theta)
bond_length = np.asarray(bond_length)
assert phi.shape == theta.shape == bond_length.shape
r = np.zeros(phi.shape + (4,4), dtype=(phi+theta+bond_length).dtype)
cp = np.cos(phi ); sp = np.sin(phi )
ct = np.cos(theta); st = np.sin(theta)
l = bond_length
r[...,0,0]= -ct; r[...,0,1]= -st; r[...,0,2]= 0; r[...,0,3]= -l*ct;
r[...,1,0]= cp*st; r[...,1,1]= -cp*ct; r[...,1,2]= -sp; r[...,1,3]= l*cp*st;
r[...,2,0]= sp*st; r[...,2,1]= -sp*ct; r[...,2,2]= cp; r[...,2,3]= l*sp*st;
r[...,3,0]= 0; r[...,3,1]= 0; r[...,3,2]= 0; r[...,3,3]= 1;
return r
def construct_equilibrium_structure(rama, angles, bond_lengths):
assert rama.shape == angles.shape == bond_lengths.shape
n_res = rama.shape[0]
n_atom = 3*n_res
assert rama.shape == (n_res,3)
t = np.zeros(n_atom)
a = angles.ravel()
b = bond_lengths.ravel()
t[3::3] = rama[:-1,1]
t[4::3] = rama[:-1,2]
t[5::3] = rama[1: ,0]
transforms = make_tab_matrices(t,a,b)
curr_affine = np.eye(4)
pos = np.zeros((3*n_res,3))
# right apply all transformations
for i,mat in enumerate(transforms):
curr_affine = np.dot(curr_affine, mat)
pos[i] = curr_affine[:3,3]
return pos
def random_initial_config(n_res):
# a reasonable model where the chain grows obeying sensible angles and omegas
rama = np.random.random((n_res,3))*2*np.pi - np.pi
angles = np.zeros_like(rama)
lengths = np.zeros_like(rama)
rama[:,2] = np.pi # all trans omega's
angles[:,0] = 120.0*deg # CA->C->N angle
angles[:,1] = 120.0*deg # C->N->CA angle
angles[:,2] = 109.5*deg # N->CA->C angle
lengths[:,0] = 1.453
lengths[:,1] = 1.526
lengths[:,2] = 1.300
return construct_equilibrium_structure(rama, angles, lengths)
# write dist_spring potential
def write_dist_spring(args):
# create a linear chain
grp = t.create_group(potential, 'dist_spring')
grp._v_attrs.arguments = np.array(['pos'])
id = np.arange(n_atom-1)
id = np.column_stack((id,id+1))
equil_dist = np.zeros(id.shape[0])
equil_dist[0::3] = 1.453
equil_dist[1::3] = 1.526
equil_dist[2::3] = 1.300
spring_const = args.bond_stiffness*np.ones(id.shape[0])
bonded_atoms = np.ones(id.shape[0], dtype='int')
create_array(grp, 'id', obj=id)
create_array(grp, 'equil_dist', obj=equil_dist)
create_array(grp, 'spring_const', obj=spring_const)
create_array(grp, 'bonded_atoms', obj=bonded_atoms)
def write_angle_spring(args):
grp = t.create_group(potential, 'angle_spring')
grp._v_attrs.arguments = np.array(['pos'])
id = np.arange(n_atom-2)
id = np.column_stack((id,id+2,id+1))
equil_angles = np.zeros(id.shape[0])
equil_angles[0::3] = np.cos(109.5*deg) # N->CA->C angle
equil_angles[1::3] = np.cos(120.0*deg) # CA->C->N angle
equil_angles[2::3] = np.cos(120.0*deg) # C->N->CA angle
create_array(grp, 'id', obj=id)
create_array(grp, 'equil_dist', obj=equil_angles)
create_array(grp, 'spring_const', obj=args.angle_stiffness*np.ones(id.shape[0]))
def write_dihedral_spring(fasta_seq):
# this is primarily used for omega bonds
grp = t.create_group(potential, 'dihedral_spring')
grp._v_attrs.arguments = np.array(['pos'])
id = np.arange(1,n_atom-3,3) # start at CA atom
id = np.column_stack((id,id+1,id+2,id+3))
target_angle = np.where((fasta_seq[1:]=='CPR'), 0.*deg, 180.*deg)
create_array(grp, 'id', obj=id)
create_array(grp, 'equil_dist', obj=target_angle)
create_array(grp, 'spring_const', obj=30.0*np.ones(id.shape[0]))
def basin_cond_prob_fcns(a_phi, a_psi):
def basin_box(phi0,phi1, psi0,psi1):
if phi0 > phi1: phi1 += 2*np.pi
if psi0 > psi1: psi1 += 2*np.pi
assert phi0 < phi1
assert psi0 < psi1
phi_mid = 0.5*(phi1 + phi0)
psi_mid = 0.5*(psi1 + psi0)
phi_switch = np.cos(phi1 - phi_mid)
psi_switch = np.cos(psi1 - psi_mid)
def f(phi,psi, phi_mid=phi_mid, psi_mid=psi_mid, phi_switch=phi_switch, psi_switch=psi_switch,
a_phi=a_phi, a_psi=a_psi):
dphi = np.cos(phi - phi_mid) # cos in the loc function ensures continuous, periodic function
dpsi = np.cos(psi - psi_mid)
return 1./(
(1.+np.exp(-a_phi*(dphi-phi_switch))) *
(1.+np.exp(-a_psi*(dpsi-psi_switch))) )
return f
bb = lambda phi0, phi1, psi0,psi1: basin_box(phi0*deg, phi1*deg, psi0*deg, psi1*deg)
basin_fcns = [
bb(-180., 0., -100., 50.), # alpha_R
bb(-180.,-100., 50.,-100.), # beta
bb(-100., 0., 50.,-100.), # PPII
bb( 0., 180., -50., 100.), # alpha_L
bb( 0., 180., 100., -50.)] # gamma
basin_cond_prob = [
(lambda phi,psi, bf=bf: bf(phi,psi)/sum(bf2(phi,psi) for bf2 in basin_fcns))
for bf in basin_fcns]
return basin_cond_prob
def mixture_potential(weights, potentials):
''' potentials must be normalized to the same value, preferably 1 '''
potentials = np.array(potentials)
assert len(weights) == len(potentials)
weights = np.array(weights)
weights = weights / weights.sum(axis=0)
# ensure that we broadcast correctly against the potential
weight_broadcast_shape = weights.shape + (1,)*(len(potentials.shape)-len(weights.shape))
weights = weights.reshape(weight_broadcast_shape)
potentials = potentials - np.log(weights)
min_pot = potentials.min(axis=0)
return min_pot - np.log(np.exp(min_pot-potentials).sum(axis=0))
def read_rama_maps_and_weights(seq, rama_group, mode='mixture', allow_CPR=True):
assert mode in ['mixture', 'product']
restype = rama_group._v_attrs.restype
dirtype = rama_group._v_attrs.dir
ridx_dict = dict([(x,i) for i,x in enumerate(restype)])
didx = dict([(x,i) for i,x in enumerate(dirtype)])
ridx = lambda resname, keep_cpr=True: (ridx_dict[resname] if resname!='CPR' or keep_cpr else ridx_dict['PRO'])
dimer_pot = rama_group.dimer_pot[:]
dimer_weight = rama_group.dimer_weight[:]
assert len(seq) >= 3 # avoid bugs
# cis-proline is only CPR when it is the central residue, otherwise just use PRO
V = lambda r,d,n: dimer_pot [ridx(r,allow_CPR), didx[d], ridx(n,False)]
W = lambda r,d,n: dimer_weight[ridx(r,allow_CPR), didx[d], ridx(n,False)]
pots = np.zeros((len(seq), dimer_pot.shape[-2], dimer_pot.shape[-1]), dtype='f4')
weights = np.zeros((len(seq),),dtype='f4')
pots [0] = V(seq[0], 'right', seq[1])
weights[0] = W(seq[0], 'right', seq[1])
for i,l,c,r in zip(range(1,len(seq)-1), seq[:-2], seq[1:-1], seq[2:]):
if mode == 'product':
pots[i] = V(c,'left',l) + V(c,'right',r) - V(c,'right','ALL')
weights[i] = 0.5*(W(c,'left',l) + W(c,'right',r)) # always just average weights
elif mode == 'mixture':
# it's a little sticky to figure out what the mixing proportions should be
# there is basically a one-sided vs two-sided problem (what if we terminate a sheet?)
# I am going with one interpretation that may not be right
pots[i] = mixture_potential([W(c,'left',l), W(c,'right',r)], [V(c,'left',l), V(c,'right',r)])
weights[i] = 0.5*(W(c,'left',l) + W(c,'right',r))
else:
raise RuntimeError('impossible')
pots [-1] = V(seq[-1], 'left', seq[-2])
weights[-1] = W(seq[-1], 'left', seq[-2])
# Ensure normalization
pots -= -np.log(np.exp(-1.0*pots).sum(axis=(-2,-1), keepdims=1))
return pots, weights
def read_weighted_maps(seq, rama_library_h5, sheet_mixing=None, mode='mixture'):
with tb.open_file(rama_library_h5) as tr:
coil_pots, coil_weights = read_rama_maps_and_weights(seq, tr.root.coil, mode=mode)
if sheet_mixing is None:
return coil_pots
else:
sheet_pots, sheet_weights = read_rama_maps_and_weights(seq, tr.root.sheet, allow_CPR=False)
return mixture_potential([coil_weights, sheet_weights*np.exp(-sheet_mixing)],
[coil_pots, sheet_pots])
def write_torus_dbn(seq, torus_dbn_library):
# FIXME use omega emission to handle CPR code
with tb.open_file(torus_dbn_library) as data:
dbn_aa_num = dict((x,i) for i,x in enumerate(data.root.restype_order[:]))
# basin_param order is log_norm,kappa_phi,mu_phi,kappa_psi,mu_psi,kappa_phi_minus_psi
basin_param = data.root.basin_param[:]
aa_basin_energy = data.root.aa_basin_energy[:]
transition_energy = data.root.transition_energy[:]
restypes = np.array([dbn_aa_num[s] for s in seq])
# Old-style parsing, more similar to original TorusDBN format
# dbn_aa_num = dict((x,i) for i,x in enumerate(data.root.restype_order[:]))
# log_normalization = data.root.TORUS_LOGNORMCONST[:]
# kappa = data.root.TORUS_KAPPA[:]
# mu = data.root.TORUS_MU[:]
# aa_emission_energy = -np.log(data.root.AA_EMISSION[:].T)
# cis_emission_energy = -np.log(data.root.CIS_EMISSION[:])
# transition_energies = -np.log(data.root.HIDDEN_TRANSITION[:])
# n_state = transition_energies.shape[0]
# # Add type to handle cis-proline
# CPR_prior = aa_emission_energy[dbn_aa_num['PRO']] + cis_emission_energy[:,1]
# dbn_aa_num['CPR'] = len(dbn_aa_num)
# aa_emission_energy = np.concatenate((aa_emission_energy,CPR_prior[None,:]),axis=0)
# basin_param = np.zeros((n_state,6),'f4')
# basin_param[:,0] = log_normalization.ravel()
# basin_param[:,1] = kappa[:,0]
# basin_param[:,2] = mu [:,0]
# basin_param[:,3] = kappa[:,1]
# basin_param[:,4] = mu [:,1]
# basin_param[:,5] = kappa[:,2]
egrp = t.create_group(potential, 'torus_dbn')
egrp._v_attrs.arguments = np.array(['rama_coord'])
# since Rama angles are not valid for the first and last angles,
# don't confuse the HMM by including them
create_array(egrp, 'id', np.arange(1,len(seq)-1))
create_array(egrp, 'restypes', restypes[1:-1])
create_array(egrp, 'prior_offset_energies', aa_basin_energy)
create_array(egrp, 'basin_param', basin_param)
hgrp = t.create_group(potential, 'fixed_hmm')
hgrp._v_attrs.arguments = np.array(['torus_dbn'])
create_array(hgrp, 'index', np.arange(egrp.id.shape[0]))
create_array(hgrp, 'transition_energy', transition_energy)
def write_rama_map_pot(seq, rama_library_h5, sheet_mixing_energy=None, secstr_bias='', mode='mixture'):
grp = t.create_group(potential, 'rama_map_pot')
grp._v_attrs.arguments = np.array(['rama_coord'])
rama_pot = read_weighted_maps(seq, rama_library_h5, sheet_mixing_energy, mode)
if sheet_mixing_energy is not None:
# support finite differencing for potential derivative
eps = 1e-2
grp._v_attrs.sheet_eps = eps
create_array(grp, 'more_sheet_rama_pot', read_weighted_maps(seq, rama_library_h5, sheet_mixing_energy+eps))
create_array(grp, 'less_sheet_rama_pot', read_weighted_maps(seq, rama_library_h5, sheet_mixing_energy-eps))
if secstr_bias:
assert len(rama_pot.shape) == 3
phi = np.linspace(-np.pi,np.pi,rama_pot.shape[1],endpoint=False)[:,None]
psi = np.linspace(-np.pi,np.pi,rama_pot.shape[2],endpoint=False)[None,:]
sigmoid_lessthan = lambda a,b: 1./(1.+np.exp(-(b-a)/(10.*deg)))
helical_basin = sigmoid_lessthan(phi,0.*deg) * sigmoid_lessthan(-100.*deg,psi) * sigmoid_lessthan(psi,50.*deg)
sheet_basin = sigmoid_lessthan(phi,0.*deg) * (sigmoid_lessthan(psi,-100.*deg) + sigmoid_lessthan(50.*deg,psi))
f = (ln.split() for ln in open(secstr_bias))
assert f.next() == 'residue secstr energy'.split()
for residue,secstr,energy in f:
residue = int(residue)
energy = float(energy)
if secstr == 'helix':
rama_pot[residue] += energy * helical_basin
elif secstr == 'sheet':
rama_pot[residue] += energy * sheet_basin
else:
raise ValueError('secstr in secstr-bias file must be helix or sheet')
# let's remove the average energy from each Rama map
# so that the Rama potential emphasizes its variation
rama_pot -= (rama_pot*np.exp(-rama_pot)).sum(axis=(-2,-1),keepdims=1)
create_array(grp, 'residue_id', obj=np.arange(len(seq)))
create_array(grp, 'rama_map_id', obj=np.arange(rama_pot.shape[0]))
create_array(grp, 'rama_pot', obj=rama_pot)
def compact_sigmoid(x, sharpness):
y = x*sharpness;
result = 0.25 * (y+2) * (y-1)**2
result = np.where((y< 1), result, np.zeros_like(result))
result = np.where((y>-1), result, np.ones_like (result))
return result
def double_compact_sigmoid(x, half_width, sharpness):
return compact_sigmoid(x-half_width, sharpness) * compact_sigmoid(-x-half_width, sharpness)
def angular_compact_double_sigmoid(theta, center, half_width, sharpness):
dev = theta-center
dev = np.where((dev< np.pi), dev, dev-2*np.pi)
dev = np.where((dev>-np.pi), dev, dev+2*np.pi)
return double_compact_sigmoid(dev, half_width, sharpness)
def rama_box(rama, center, half_width, sharpness):
# print 'center', center
# print 'half_width', half_width
# print 'sharpness', sharpness, 1/sharpness
assert rama.shape[-1] == center.shape[-1] == half_width.shape[-1] == 2
s = center.shape[:-1]
if not s:
return (angular_compact_double_sigmoid(rama[...,0], rama, center, half_width, sharpness)*
angular_compact_double_sigmoid(rama[...,1], rama, center, half_width, sharpness))
else:
result = np.zeros(rama.shape[:-1] + center.shape[:-1])
for inds in np.indices(s).reshape((len(s),-1)).T:
inds = tuple(inds)
if len(inds) == 1: inds = inds[0]
value = (
angular_compact_double_sigmoid(rama[...,0], center[inds,0], half_width[inds,0], sharpness)*
angular_compact_double_sigmoid(rama[...,1], center[inds,1], half_width[inds,1], sharpness))
result[...,inds] = value
return result
def read_fasta(file_obj):
lines = list(file_obj)
assert lines[0][0] == '>'
one_letter_seq = ''.join(x.strip().replace('\r','') for x in lines[1:])
seq = []
cis_state = False
for a in one_letter_seq:
if cis_state:
assert a == 'P' # proline must follow start
seq.append('CPR')
cis_state = False
elif a == "*":
cis_state = True
else:
seq.append(three_letter_aa[a])
return np.array(seq)
def write_CB(fasta):
# Place CB
pgrp = t.create_group(potential, 'placement_fixed_point_only_CB')
pgrp._v_attrs.arguments = np.array(['affine_alignment'])
ref_pos = np.zeros((4,3))
ref_pos[0] = (-1.19280531, -0.83127186, 0.) # N
ref_pos[1] = ( 0., 0., 0.) # CA
ref_pos[2] = ( 1.25222632, -0.87268266, 0.) # C
ref_pos[3] = ( 0., 0.94375626, 1.2068012) # CB
ref_pos -= ref_pos[:3].mean(axis=0,keepdims=1)
placement_data = np.zeros((1,3))
placement_data[0,0:3] = ref_pos[3]
create_array(pgrp, 'affine_residue', np.arange(len(fasta)))
create_array(pgrp, 'layer_index', np.zeros(len(fasta),dtype='i'))
create_array(pgrp, 'placement_data', placement_data)
def write_contact_energies(parser, fasta, contact_table):
fields = [ln.split() for ln in open(contact_table,'U')]
header_fields = 'residue1 residue2 energy distance transition_width'.split()
if [x.lower() for x in fields[0]] != header_fields:
parser.error('First line of contact energy table must be "%s"'%(" ".join(header_fields)))
if not all(len(f)==len(header_fields) for f in fields):
parser.error('Invalid format for contact file')
fields = fields[1:]
n_contact = len(fields)
g = t.create_group(t.root.input.potential, 'contact')
g._v_attrs.arguments = np.array(['placement_fixed_point_only_CB'])
id = np.zeros((n_contact,2), dtype='i')
energy = np.zeros((n_contact,))
dist = np.zeros((n_contact,))
width = np.zeros((n_contact,))
for i,f in enumerate(fields):
id[i] = (int(f[0]), int(f[1]))
msg = 'Contact energy specified for residue %i (zero is first residue) but there are only %i residues in the FASTA'
if not (0 <= id[i,0] < len(fasta)): raise ValueError(msg % (id[i,0], len(fasta)))
if not (0 <= id[i,1] < len(fasta)): raise ValueError(msg % (id[i,1], len(fasta)))
energy[i] = float(f[2])
dist[i] = float(f[3])
width[i] = float(f[4]) # compact_sigmoid cuts off at distance +/- width
if width[i] <= 0.: raise ValueError('Cannot have negative contact transition_width')
# 0-based indexing sometimes trips up users, so give them a quick check
highlight_residues('residues that participate in any --contact potential in uppercase', fasta, id.ravel())
if energy.max() > 0.:
print ('\nWARNING: Some contact energies are positive (repulsive).\n'+
' Please ignore this warning if you intentionally have repulsive contacts.')
create_array(g, 'id', obj=id)
create_array(g, 'energy', obj=energy)
create_array(g, 'distance', obj=dist)
create_array(g, 'width', obj=width)
def write_rama_coord():
grp = t.create_group(potential, 'rama_coord')
grp._v_attrs.arguments = np.array(['pos'])
n_res = n_atom/3
N_id = 3*np.arange(n_res)
id = np.column_stack((N_id-1,N_id,N_id+1,N_id+2,N_id+3))
id[id>=n_atom] = -1 # last atom is non-existent (as is first)
# and non-existence is indicated by -1
create_array(grp, 'id', id)
def write_sidechain_radial(fasta, library, excluded_residues, suffix=''):
g = t.create_group(t.root.input.potential, 'radial'+suffix)
g._v_attrs.arguments = np.array(['placement_fixed_point_only_CB'])
for res_num in excluded_residues:
if not (0<=res_num<len(fasta)):
raise ValueError('Residue number %i is invalid'%res_num)
residues = sorted(set(np.arange(len(fasta))).difference(excluded_residues))
with tb.open_file(library) as params:
resname2restype = dict((x,i) for i,x in enumerate(params.root.names[:]))
n_type = len(resname2restype)
create_array(g, 'index', obj=np.array(residues))
create_array(g, 'type', obj=np.array([resname2restype[x] for x in fasta[residues]]))
create_array(g, 'id', obj=np.array(residues)) # FIXME update for chain breaks
create_array(g, 'interaction_param', obj=params.root.interaction_param[:])
def write_rotamer_placement(fasta, placement_library, dynamic_placement, dynamic_1body, fix_rotamer):
def compute_chi1_state(angles):
chi1_state = np.ones(angles.shape, dtype='i')
chi1_state[( 0.*deg<=angles)&(angles<120.*deg)] = 0
chi1_state[(-120.*deg<=angles)&(angles< 0.*deg)] = 2
return chi1_state
with tb.open_file(placement_library) as data:
restype_num = dict((aa,i) for i,aa in enumerate(data.root.restype_order[:]))
if dynamic_placement:
placement_pos = data.root.rotamer_center[:].transpose((2,0,1,3)) # put layer index first
else:
placement_pos = data.root.rotamer_center_fixed[:]
if dynamic_1body:
placement_energy = -np.log(data.root.rotamer_prob[:].transpose((2,0,1)))[...,None]
else:
placement_energy = data.root.rotamer_prob_fixed[:][...,None]
start_stop = data.root.rotamer_start_stop_bead[:]
find_restype = data.root.restype_and_chi_and_state[:,0].astype('i')
find_chi1 = data.root.restype_and_chi_and_state[:,1]
find_chi1_state = compute_chi1_state(data.root.restype_and_chi_and_state[:,1])
find_chi2 = data.root.restype_and_chi_and_state[:,2]
find_state = data.root.restype_and_chi_and_state[:,3].astype('i')
fix = dict()
if fix_rotamer:
fields = [x.split() for x in list(open(fix_rotamer))]
header = 'residue restype chain resnum chi1 chi2'
actual_header = [x.lower() for x in fields[0]]
if actual_header != header.split():
raise RuntimeError('First line of fix-rotamer table must be "%s" but is "%s" for file %s'
%(header," ".join(actual_header),fix_rotamer))
for residue, restype, chain, resnum, chi1, chi2 in fields[1:]:
if fasta[int(residue)] != (restype if restype != 'CPR' else 'PRO'):
raise RuntimeError("fix-rotamer file does not match FASTA"
+ ", residue %i should be %s but fix-rotamer file has %s"%(
int(residue), fasta[int(residue)], restype))
chi1 = float(chi1)*deg # convert to radians internally
chi2 = float(chi2)*deg
if restype == 'GLY' or restype == 'ALA':
fix_state = 0
else:
if np.isnan(chi1): continue
# determine states that have the right restype and compatible chi1
chi1_state = compute_chi1_state(np.array([chi1]))[0]
restype_admissible = find_restype == restype_num[fasta[int(residue)]]
chi1_admissible = find_chi1_state == chi1_state
admissible = restype_admissible&chi1_admissible
admissible_chi2 = find_chi2[admissible]
admissible_state = find_state[admissible]
if len(admissible_state)==1: # handle short residues (like VAL)
fix_state = admissible_state[0]
else:
if np.isnan(chi2): continue
# now find the closest chi2 among those states and read off the state index
chi2_dist = (admissible_chi2-chi2)%(2*np.pi)
chi2_dist[chi2_dist>np.pi] -= 2*np.pi # find closest periodic image
fix_state = admissible_state[np.argmin(chi2_dist)]
fix[int(residue)] = fix_state
rama_residue = []
affine_residue = []
layer_index = []
beadtype_seq = []
id_seq = []
ref_chi1_state = []
count_by_n_rot = dict()
for rnum,aa in enumerate(fasta):
restype = restype_num[aa]
start,stop,n_bead = start_stop[restype]
assert (stop-start)%n_bead == 0
n_rot = (stop-start)//n_bead
# if it should be fixed, then we must modify these answers to get a single rotamer
if rnum in fix:
if not (0 <= fix[rnum] < n_rot): raise ValueError('invalid fix rotamer state')
start,stop = start+n_bead*fix[rnum], start+n_bead*(fix[rnum]+1)
n_rot = 1
if n_rot not in count_by_n_rot:
count_by_n_rot[n_rot] = 0;
base_id = (count_by_n_rot[n_rot]<<n_bit_rotamer) + n_rot
count_by_n_rot[n_rot] += 1
rama_residue .extend([rnum]*(stop-start))
affine_residue.extend([rnum]*(stop-start))
layer_index .extend(np.arange(start,stop))
beadtype_seq .extend(['%s_%i'%(aa,i) for i in range(n_bead)]*n_rot)
id_seq .extend(np.arange(stop-start)//n_bead + (base_id<<n_bit_rotamer))
sc_node_name = 'placement%s_point_vector_only' % ('' if dynamic_placement else '_fixed')
grp = t.create_group(potential, sc_node_name)
grp._v_attrs.arguments = np.array(['affine_alignment'] + (['rama_coord'] if dynamic_placement else []))
create_array(grp, 'rama_residue', rama_residue)
create_array(grp, 'affine_residue', affine_residue)
create_array(grp, 'layer_index', layer_index)
create_array(grp, 'placement_data', placement_pos[...,:6])
create_array(grp, 'beadtype_seq', beadtype_seq)
create_array(grp, 'id_seq', np.array(id_seq))
create_array(grp, 'fix_rotamer', np.array(sorted(fix.items())))
# create_array(grp, 'ref_chi1_state', np.array(ref_chi1_state))
# create_array(grp, 'find_chi1', find_chi1)
pl_node_name = 'placement%s_scalar' % ('' if dynamic_1body else '_fixed')
grp = t.create_group(potential, pl_node_name)
grp._v_attrs.arguments = np.array(['affine_alignment']+(['rama_coord'] if dynamic_1body else []))
create_array(grp, 'rama_residue', rama_residue)
create_array(grp, 'affine_residue', affine_residue)
create_array(grp, 'layer_index', layer_index)
create_array(grp, 'placement_data', placement_energy)
return sc_node_name, pl_node_name
def write_rotamer(fasta, interaction_library, damping, sc_node_name, pl_node_name):
g = t.create_group(t.root.input.potential, 'rotamer')
args = [sc_node_name,pl_node_name]
def arg_maybe(nm):
if nm in t.root.input.potential: args.append(nm)
arg_maybe('hbond_coverage')
arg_maybe('hbond_coverage_hydrophobe')
g._v_attrs.arguments = np.array(args)
g._v_attrs.max_iter = 1000
g._v_attrs.tol = 1e-3
g._v_attrs.damping = damping
g._v_attrs.iteration_chunk_size = 2
pg = t.create_group(g, "pair_interaction")
with tb.open_file(interaction_library) as data:
create_array(pg, 'interaction_param', data.root.pair_interaction[:])
bead_num = dict((k,i) for i,k in enumerate(data.root.bead_order[:]))
# pg._v_attrs.energy_cap = data.root._v_attrs.energy_cap_1body
# pg._v_attrs.energy_cap_width = data.root._v_attrs.energy_cap_width_1body
sc_node = t.get_node(t.root.input.potential, sc_node_name)
rseq = sc_node.beadtype_seq[:]
create_array(pg, 'index', np.arange(len(rseq)))
create_array(pg, 'type', np.array([bead_num[s] for s in rseq]))
create_array(pg, 'id', sc_node.id_seq[:])
def write_membrane_potential(
fasta_seq, membrane_potential_fpath, membrane_thickness, membrane_exclude_residues, hbond_exclude_residues):
grp = t.create_group(t.root.input.potential, 'membrane_potential')
grp._v_attrs.arguments = np.array(['placement_fixed_point_only_CB', 'environment_coverage', 'protein_hbond'])
with tb.open_file(membrane_potential_fpath) as lib:
resnames = lib.root.names[:]
cb_energy = lib.root.cb_energy[:]
cb_z_min = lib.root.cb_energy._v_attrs.z_min
cb_z_max = lib.root.cb_energy._v_attrs.z_max
thickness = lib.root.cb_energy._v_attrs.thickness
uhb_energy = lib.root.uhb_energy[:]
uhb_z_min = lib.root.uhb_energy._v_attrs.z_min
uhb_z_max = lib.root.uhb_energy._v_attrs.z_max
cov_midpoint = lib.root.cov_midpoint[:]
cov_sharpness = lib.root.cov_sharpness[:]
#<----- ----- ----- ----- donor/acceptor res ids ----- ----- ----- ----->#
# Note: hbond_excluded_residues is the same as in the function write_infer_H_O.
n_res = len(fasta_seq)
donor_residue_ids = np.array([i for i in range(n_res) if i>0 and i not in hbond_exclude_residues and fasta_seq[i]!='PRO'])
acceptor_residue_ids = np.array([i for i in range(n_res) if i<n_res-1 and i not in hbond_exclude_residues])
#<----- ----- ----- ----- make energy splines ----- ----- ----- ----->#
import scipy.interpolate
def extrapolated_spline(x0, y0):
spline = scipy.interpolate.InterpolatedUnivariateSpline(x0,y0)
def f(x, spline=spline):
return np.select(
[(x<x0[0]), (x>x0[-1]), np.ones_like(x,dtype='bool')],
[np.zeros_like(x)+y0[0], np.zeros_like(x)+y0[-1], spline(x)])
return f
cb_z_lib = np.linspace(cb_z_min, cb_z_max, cb_energy.shape[-1])
cb_energy_splines = [extrapolated_spline(cb_z_lib, ene) for ene in cb_energy]
uhb_z_lib = np.linspace(uhb_z_min, uhb_z_max, uhb_energy.shape[-1])
uhb_energy_splines = [extrapolated_spline(uhb_z_lib, ene) for ene in uhb_energy]
#<----- ----- ----- ----- make energy splines ----- ----- ----- ----->#
# This step is necessary in case the supplied membrane thickness is not eaual to the thickness in the membrane potential file.
default_half_thickness = thickness/2.
half_thickness = membrane_thickness/2.
z_ = np.linspace(-half_thickness - 15., half_thickness + 15., int((membrane_thickness+30.)/0.25)+1)
# ensure that the potential is continuous at 0
# spline(z-(half_thickness-default_half_thickness)) may not equal to spline(z+(half_thickness-default_half_thickness))
membrane_cb_energies = np.zeros((len(cb_energy_splines), len(z_)))
for ispl, spline in enumerate(cb_energy_splines):
if half_thickness < default_half_thickness:
delta_t = default_half_thickness - half_thickness
delta_s = spline(delta_t) - spline(-delta_t)
membrane_cb_energies[ispl] = np.select([(z_ < 0), (z_ >= 0.)],
[spline(z_-delta_t) + 0.5*delta_s, spline(z_+delta_t) - 0.5*delta_s])
elif half_thickness > default_half_thickness:
delta_t = half_thickness - default_half_thickness
membrane_cb_energies[ispl] = np.select([
(z_ < -delta_t),
(z_ >= -delta_t) & (z_ <= delta_t),
(z_ > delta_t)],
[spline(z_+delta_t), spline(0), spline(z_-delta_t)])
else:
membrane_cb_energies[ispl] = spline(z_)
membrane_uhb_energies = np.zeros((len(uhb_energy_splines), len(z_)))
for ispl, spline in enumerate(uhb_energy_splines):
if half_thickness < default_half_thickness:
delta_t = default_half_thickness - half_thickness
delta_s = spline(delta_t) - spline(-delta_t)
membrane_uhb_energies[ispl] = np.select([(z_ < 0), (z_ >= 0.)],
[spline(z_-delta_t) + 0.5*delta_s, spline(z_+delta_t) - 0.5*delta_s])
elif half_thickness > default_half_thickness:
delta_t = half_thickness - default_half_thickness
membrane_uhb_energies[ispl] = np.select([
(z_ < -delta_t),
(z_ >= -delta_t) & (z_ <= delta_t),
(z_ > delta_t)],
[spline(z_+delta_t), spline(0), spline(z_-delta_t)])
else:
membrane_uhb_energies[ispl] = spline(z_)
#<----- ----- ----- ----- cb energy indices ----- ----- ----- ----->#
# Note: there's a residue type, NON, in resnames for those excluded from membrane potential.
# And there's a potential profile in cb_energy for NON, which is all zeros.
if set(membrane_exclude_residues).difference(range(len(fasta_seq))) != set():
raise ValueError('Residue number', set(membrane_exclude_residues).difference(range(len(fasta_seq))), 'not valid')
highlight_residues('membrane_exclude_residues', fasta_seq, membrane_exclude_residues)
sequence = list(fasta_seq)
for num in membrane_exclude_residues:
sequence[num] = 'NON'
sequence = np.array(sequence)
resname_to_num = dict([(aa,i) for i,aa in enumerate(resnames)])
residue_id = np.array([i for i,aa in enumerate(sequence)])
cb_energy_index = np.array([resname_to_num[aa] for aa in sequence])
#<----- ----- ----- ----- write to grp ----- ----- ----- ----->#
create_array(grp, 'cb_index', residue_id)
create_array(grp, 'env_index', residue_id)
create_array(grp, 'residue_type', cb_energy_index)
create_array(grp, 'cov_midpoint', cov_midpoint)
create_array(grp, 'cov_sharpness', cov_sharpness)
create_array(grp, 'cb_energy', membrane_cb_energies)
create_array(grp, 'uhb_energy', membrane_uhb_energies)
create_array(grp, 'donor_residue_ids', donor_residue_ids)
create_array(grp, 'acceptor_residue_ids', acceptor_residue_ids)
grp. cb_energy._v_attrs.z_min = z_[ 0]
grp. cb_energy._v_attrs.z_max = z_[-1]
grp.uhb_energy._v_attrs.z_min = z_[ 0]
grp.uhb_energy._v_attrs.z_max = z_[-1]
def parse_segments(s):
''' Parse segments of the form 10-30,50-60 '''
import argparse
import re
if re.match('^([0-9]+(-[0-9]+)?)(,[0-9]+(-[0-9]+)?)*$', s) is None:
raise argparse.ArgumentTypeError('segments must be of the form 10-30,45,72-76 or similar')
def parse_seg(x):
atoms = x.split('-')
if len(atoms) == 1:
return np.array([int(atoms[0])])
elif len(atoms) == 2:
return np.arange(int(atoms[0]),1+int(atoms[1])) # inclusive on both ends
else:
raise RuntimeError('the impossible happened. oops.')
ints = np.concatenate([parse_seg(a) for a in s.split(',')])
ints = np.array(sorted(set(ints))) # remove duplicates and sort
return ints
def parse_float_pair(s):
import argparse
import re
args = s.split(',')
if len(args) != 2:
raise argparse.ArgumentTypeError('must be in the form -2.0,-1.0 or similar (exactly 2 numbers)')
return (float(args[0]), float(args[1]))
def chain_endpts(n_res, chain_first_residue, i):
n_chains = chain_first_residue.size+1
if i == 0:
first_res = 0
next_first_res = chain_first_residue[i]
elif i == n_chains-1:
first_res = chain_first_residue[i-1]
next_first_res = n_res
else:
first_res = chain_first_residue[i-1]
next_first_res = chain_first_residue[i]
return first_res, next_first_res
def main():
import argparse
parser = argparse.ArgumentParser(description='Prepare input file',
usage='use "%(prog)s --help" for more information')
parser.add_argument('--fasta', required=True,
help='[required] FASTA sequence file')
parser.add_argument('--output', default='system.h5', required=True,
help='path to output the created .h5 file (default system.h5)')
parser.add_argument('--target-structure', default='',
help='Add target .initial.pkl structure for later analysis. This information is written under '+
'/target and is never read by Upside. The /target group may be useful for later analysis.')
parser.add_argument('--no-backbone', dest='backbone', default=True, action='store_false',
help='do not use rigid nonbonded for backbone N, CA, C, and CB')
parser.add_argument('--rotamer-placement', default=None,
help='rotameric sidechain library')
parser.add_argument('--dynamic-rotamer-placement', default=False, action='store_true',
help='Use dynamic rotamer placement (not recommended)')
parser.add_argument('--dynamic-rotamer-1body', default=False, action='store_true',
help='Use dynamic rotamer 1body')
parser.add_argument('--fix-rotamer', default='',
help='Table of fixed rotamers for specific sidechains. A header line must be present and the first '+
'three columns of that header must be '+
'"residue restype rotamer", but there can be additional, ignored columns. The restype must '+
'match the corresponding restype in the FASTA file (intended to prevent errors). It is permissible '+
'to fix only a subset of rotamers. The value of rotamer must be an integer, corresponding to the '+
'numbering in the --rotamer-placement file. Such a file can be created with PDB_to_initial_structure '+
'--output-chi1.')
parser.add_argument('--rotamer-interaction', default=None,
help='rotamer sidechain pair interaction parameters')
parser.add_argument('--rotamer-solve-damping', default=0.4, type=float,
help='damping factor to use for solving sidechain placement problem')
parser.add_argument('--sidechain-radial', default=None,
help='use sidechain radial potential library')
parser.add_argument('--sidechain-radial-exclude-residues', default=[], type=parse_segments,
help='Residues that do not participate in the --sidechain-radial potential (same format as --restraint-group)')
parser.add_argument('--bond-stiffness', default=48., type=float,
help='Bond spring constant in units of energy/A^2 (default 48)')
parser.add_argument('--angle-stiffness', default=175., type=float,
help='Angle spring constant in units of 1/dot_product (default 175)')
parser.add_argument('--rama-library', default='',
help='smooth Rama probability library')
parser.add_argument('--rama-library-combining-rule', default='mixture',
help='How to combine left and right coil distributions in Rama library '+
'(mixture or product). Default is mixture.')
# parser.add_argument('--torus-dbn-library', default='',
# help='TorusDBN Rama probability function')
parser.add_argument('--rama-sheet-library', default=None,
help='smooth Rama probability library for sheet structures')
parser.add_argument('--secstr-bias', default='',
help='Bias file for secondary structure. First line of the file must be "residue secstr energy". '+
'secstr must be one of "helix" or "sheet". Bias is implemented by a simple Rama bias, hence coil bias '+
'is not implemented.')
parser.add_argument('--rama-sheet-mixing-energy', default=None, type=float,
help='reference energy for sheets when mixing with coil library. More negative numbers mean more '+
'sheet content in the final structure. Default is no sheet mixing.')
parser.add_argument('--hbond-energy', default=0., type=float,
help='energy for forming a protein-protein hydrogen bond. Default is no HBond energy.')
parser.add_argument('--hbond-exclude-residues', default=[], type=parse_segments,
help='Residues to have neither hydrogen bond donors or acceptors')
parser.add_argument('--chain-break-from-file', default='',
help='File with indices of chain first residues recorded during initial structure generation to automate --hbond-exclude-residues.')
parser.add_argument('--loose-hbond-criteria', default=False, action='store_true',
help='Use far more permissive angles and distances to judge HBonding. Do not use for simulation. '+
'This is only useful for static backbone training when crystal or NMR structures have poor '+
'hbond geometry.')
parser.add_argument('--z-flat-bottom', default='',
help='Table of Z-flat-bottom springs. Each line must contain 4 fields and the first line '+
'must contain "residue z0 radius spring_constant". The restraint is applied to the CA atom '+
'of each residue.')
parser.add_argument('--tension', default='',
help='Table of linear tensions. Each line must contain 4 fields and the first line '+
'must contain "residue tension_x tension_y tension_z". The residue will be pulled in the '+
'direction (tension_x,tension_y,tension_z) by its CA atom. The magnitude of the tension vector '+
'sets the force. Units are kT/Angstrom.')
parser.add_argument('--ask-before-using-AFM', default='',
help='Table of tip positions and pulling velocitis for mimicing AFM pulling experiment in the constant velocity mode. ' +
'Each line must contain 8 fields and the first line must contain ' +
'"residue spring_const tip_pos_x tip_pos_y tip_pos_z pulling_vel_x pulling_vel_y pulling_vel_z". ' +
'The residue will be pulled in the direction (pulling_vel_x, pulling_vel_y, pulling_vel_z) by its CA atom, ' +
'which is attached to the tip at (tip_pos_x, tip_pos_y, tip_pos_z). ' +
'The magnitude of the pulling velocity vector sets the pulling speed. The unit is: angstrom/time_step. ' +
'The spring_const is in the unit of kT/angstrom^2. At T = 298.15 K, it equals 41.14 pN/angstrom. ' +
'Note: consult with the developer before using this AFM function.')
parser.add_argument('--AFM-time-initial', default=0., type=float,
help='Time initial for AFM pulling simulation. The default value is 0. ' +
'WARNING: do not change this value unless the simulation is a continuation of a previous one. ' +
'To set the time initial, check the /root/output/time_estimate in the output h5 file. ' )
parser.add_argument('--AFM-time-step', default=0.009, type=float,
help='Time step for AFM pulling simulation. The default value is 0.009. ' +
'WARNING: this should be the same as the global time step, which is set to 0.009 by default. Change this value accordingly.')
parser.add_argument('--initial-structure', default='',
help='Pickle file for initial structure for the simulation. ' +
'If there are not enough structures for the number of replicas ' +
'requested, structures will be recycled. If not provided, a ' +
'freely-jointed chain with good bond lengths and angles but bad dihedrals will be used ' +
'instead.')
parser.add_argument('--restraint-group', default=[], action='append', type=parse_segments,
help='List of residues in the protein. The residue list should be of a form like ' +
'--restraint-group 10-13,17,19-21 and that list would specify all the atoms in '+
'residues 10,11,12,13,17,19,20,21. '+
'Each atom in the specified residues will be randomly connected to atoms in other residues by ' +
'springs with equilibrium distance given by the distance of the atoms in the initial structure. ' +
'Multiple restraint groups may be specified by giving the --restraint-group flag multiple times '
'with different residue lists. The strength of the restraint is given by --restraint-spring-constant')
parser.add_argument('--apply-restraint-group-to-each-chain', action='store_true',
help='Use indices of chain first residues recorded during PDB_to_initial_structure to automate'+
' --restraint-group for chains. Requires --chain-break-from-file.')
parser.add_argument('--restraint-spring-constant', default=4., type=float,
help='Spring constant used to restrain atoms in a restraint group (default 4.) ')
parser.add_argument('--contact-energies', default='',
help='Path to text file that defines a contact energy function. The first line of the file should ' +
'be a header containing "residue1 residue2 energy distance transition_width", and the remaining '+
'lines should contain space separated values. The form of the interaction is approximately '+
'sigmoidal but the potential is constant outside (distance-transition_width,distance+transition_width).'+
' This potential is approximately twice as sharp as a standard sigmoid with the same width as the '+
'specified transition_width. The location x_residue is approximately the CB position of the '+
'residue.')
parser.add_argument('--environment-potential', default='',
help='Path to many-body environment potential')
parser.add_argument('--reference-state-rama', default='',
help='Do not use this unless you know what you are doing.')
parser.add_argument('--membrane-thickness', default=None, type=float,
help='Thickness of the membrane in angstroms for use with --membrane-potential.')
parser.add_argument('--membrane-potential', default='',
help='Parameter file (.h5 format) for membrane potential. User must also supply --membrane-thickness.')
parser.add_argument('--membrane-exclude-residues', default=[], type=parse_segments,
help='Residues that do not participate in the --membrane-potential (same format as --restraint-group).' +
'User must also supply --membrane-potential.')
parser_grp1 = parser.add_mutually_exclusive_group()
parser_grp1.add_argument('--cavity-radius', default=0., type=float,
help='Enclose the whole simulation in a radial cavity centered at the origin to achieve finite concentration '+
'of protein. Necessary for multichain simulation (though this mode is unsupported.')
parser_grp1.add_argument('--debugging-only-heuristic-cavity-radius', default=0., type=float,
help='Set the cavity radius to this provided scale factor times the max distance between com\'s and atoms of the chains.')
parser_grp1.add_argument('--cavity-radius-from-config', default='', help='Config file with cavity radius set. Useful for applying'+
' the same heuristic cavity of bound complex config to unbound counterpart')
parser.add_argument('--make-unbound', action='store_true',
help='Separate chains into different corners of a cavity that you set with one of the cavity options.')
parser.add_argument('--debugging-only-disable-basic-springs', default=False, action='store_true',
help='Disable basic springs (like bond distance and angle). Do not use this.')
args = parser.parse_args()
if args.restraint_group and not args.initial_structure:
parser.error('must specify --initial-structures to use --restraint-group')
if args.apply_restraint_group_to_each_chain and not args.chain_break_from_file:
parser.error('--apply-restraint-group-to-each-chain requires --chain-break-from-file')
if args.make_unbound and not args.chain_break_from_file:
parser.error('--make-unbound requires --chain-break-from-file')
fasta_seq_with_cpr = read_fasta(open(args.fasta,'U'))
fasta_seq = np.array([(x if x != 'CPR' else 'PRO') for x in fasta_seq_with_cpr]) # most potentials don't care about CPR
require_affine = False
require_rama = False
require_backbone_point = False
global n_atom, t, potential
n_res = len(fasta_seq)
n_atom = 3*n_res
t = tb.open_file(args.output,'w')
input = t.create_group(t.root, 'input')
create_array(input, 'sequence', obj=fasta_seq_with_cpr)
if args.initial_structure:
init_pos = cPickle.load(open(args.initial_structure))
assert init_pos.shape == (n_atom, 3, 1)
if args.target_structure:
def f():
# little function closure to protect the namespace from ever seeing the target structure
target_pos = cPickle.load(open(args.target_structure))
assert target_pos.shape == (n_atom, 3, 1)
g_target = t.create_group(t.root, 'target')
t.create_array(t.root.target, 'pos', obj=target_pos[:,:,0])
f()
pos = np.zeros((n_atom, 3, 1), dtype='f4')
if args.initial_structure:
pos[:,:,0] = init_pos[...,0]
else:
pos[:,:,0] = random_initial_config(len(fasta_seq))
create_array(input, 'pos', obj=pos)
potential = t.create_group(input, 'potential')
if not args.debugging_only_disable_basic_springs:
write_dist_spring(args)
write_angle_spring(args)
write_dihedral_spring(fasta_seq_with_cpr)
sc_node_name = ''
if args.rotamer_placement:
require_rama = True
require_affine = True
sc_node_name, pl_node_name = write_rotamer_placement(
fasta_seq, args.rotamer_placement,
args.dynamic_rotamer_placement, args.dynamic_rotamer_1body,
args.fix_rotamer)
if args.chain_break_from_file:
try:
with open(args.chain_break_from_file) as infile:
chain_dat = list(infile)
# chain_first_residue = np.loadtxt(args.chain_break_from_file, ndmin=1, dtype='int32')
except IOError:
chain_first_residue = np.array([], dtype='int32')
n_chains = 1
else:
if len(chain_dat) > 1:
has_rl_info = True
else:
has_rl_info = False
chain_first_residue = chain_dat[0].split()
chain_first_residue = np.array(chain_first_residue, dtype='int32')
print "chain_first_residue:", chain_first_residue
n_chains = chain_first_residue.size+1
if has_rl_info:
rl_chains = np.array(chain_dat[-1].split(), dtype='int32')
# rl_chains = [int(i) for i in rl_chains]
print "rl_chains:", rl_chains
print
print "n_chains"
print n_chains
if chain_first_residue.size:
break_grp = t.create_group("/input","chain_break","Indicates that multi-chain simulation and removal of bonded potential terms accross chains requested")
t.create_array(break_grp, "chain_first_residue", chain_first_residue, "Contains array of chain first residues, apart from residue 0")
if has_rl_info:
t.create_array(break_grp, "rl_chains", rl_chains, "Numbers of receptor and ligand chains")
required_hbond_exclude_res = [i+j for i in chain_first_residue for j in [-1,0]]
if args.hbond_exclude_residues:
args.hbond_exclude_residues = np.unique(np.append(args.hbond_exclude_residues, required_hbond_exclude_res))
else:
args.hbond_exclude_residues = np.array(required_hbond_exclude_res)
print
print "hbond_exclude_residues"
print args.hbond_exclude_residues
if args.hbond_energy:
write_infer_H_O (fasta_seq, args.hbond_exclude_residues)
write_count_hbond(fasta_seq, args.hbond_energy, args.rotamer_interaction, args.loose_hbond_criteria, sc_node_name)
if args.environment_potential:
if args.rotamer_placement is None:
parser.error('--rotamer-placement is required, based on other options.')
write_environment(fasta_seq, args.environment_potential, sc_node_name, pl_node_name)
args_group = t.create_group(input, 'args')
for k,v in sorted(vars(args).items()):
args_group._v_attrs[k] = v
args_group._v_attrs['invocation'] = ' '.join(sys.argv[:])
if args.rama_library:
require_rama = True
write_rama_map_pot(fasta_seq_with_cpr, args.rama_library, args.rama_sheet_mixing_energy,
args.secstr_bias, args.rama_library_combining_rule)
# elif args.torus_dbn_library:
# require_rama = True
# write_torus_dbn(fasta_seq_with_cpr, args.torus_dbn_library)
else:
print>>sys.stderr, 'WARNING: running without any Rama potential !!!'
# hack to fix reference state issues for Rama potential
if args.reference_state_rama:
# define correction
ref_state_cor = np.log(cPickle.load(open(args.reference_state_rama)))
ref_state_cor -= ref_state_cor.mean()
grp = t.create_group(potential, 'rama_map_pot_ref')
grp._v_attrs.arguments = np.array(['rama_coord'])
grp._v_attrs.log_pot = 0
create_array(grp, 'residue_id', obj=np.arange(len(fasta_seq)))
create_array(grp, 'rama_map_id', obj=np.zeros(len(fasta_seq), dtype='i4'))
create_array(grp, 'rama_pot', obj=ref_state_cor[None])
if args.debugging_only_heuristic_cavity_radius:
if n_chains < 2:
print>>sys.stderr, 'WARNING: --debugging-only-heuristic-cavity-radius requires at least 2 chains. Skipping setting up cavity'
else:
com_list = []
com_dist_list = []
for i in xrange(n_chains):
first_res, next_first_res = chain_endpts(n_res, chain_first_residue, i)
com_list.append(pos[first_res*3:next_first_res*3,:,0].mean(axis=0))
# Distance between chain com
# for i in xrange(n_chains):
# print
# print "com_list"
# print com_list[i]
# for j in xrange(n_chains):
# if j > i:
# com_dist_list.append(vmag(com_list[i]-com_list[j]))
# args.cavity_radius = 2.*max(com_dist_list)
# print
# print "old cavity_radius"
# print args.cavity_radius
# com_dist_list = []
# Distance between chain com and all atoms
for i in xrange(n_chains):
for j in xrange(n_atom):
com_dist_list.append(vmag(com_list[i]-pos[j,:,0]))
# Max distance between all atoms
args.cavity_radius = args.debugging_only_heuristic_cavity_radius*max(com_dist_list)
print
print "cavity_radius"
print args.cavity_radius
if args.cavity_radius_from_config:
if n_chains < 2:
print>>sys.stderr, 'WARNING: --cavity-radius-from-config requires at least 2 chains. Skipping setting up cavity'
elif args.debugging_only_heuristic_cavity_radius:
print>>sys.stderr, 'WARNING: Overwriting heuristic cavity with the one from --cavity-radius-from-config'
else:
t_cavity = tb.open_file(args.cavity_radius_from_config,'r')
args.cavity_radius = t_cavity.root.input.potential.cavity_radial.radius[0]
t_cavity.close()
print
print "cavity_radius"
print args.cavity_radius
if args.cavity_radius:
write_cavity_radial(args.cavity_radius)
if args.make_unbound:
if n_chains < 2 or n_chains > 8:
print>>sys.stderr, 'WARNING: --make-unbound requires at least 2 and no more than 8 chains. Skipping separating chains'
elif not args.cavity_radius:
print>>sys.stderr, 'WARNING: --make-unbound requires setting a cavity radius. Skipping separating chains'
else:
print
print "making unbound"
displacement = np.array([[-1.,0.,0.], [1.,0.,0.],
[0.,-1.,0.], [0.,1.,0.],
[0.,0.,-1.], [0.,0.,1.],])
if not has_rl_info: # separate all chains
for j in xrange(n_chains):
first_res, next_first_res = chain_endpts(n_res, chain_first_residue, j)
#com = pos[first_res*3:next_first_res*3,:,0].mean(axis=0)
pos[first_res*3:next_first_res*3,:,0] = (pos[first_res*3:next_first_res*3,:,0] +
displacement[j]*0.5*args.cavity_radius) #- displacement[j]*com
else: # keep receptor and ligand chains together
# move receptor chains
first_res = chain_endpts(n_res, chain_first_residue, 0)[0]
next_first_res = chain_endpts(n_res, chain_first_residue, rl_chains[0]-1)[1]
pick_disp = random.choice([0, 2, 4])
pos[first_res*3:next_first_res*3,:,0] = pos[first_res*3:next_first_res*3,:,0] + displacement[pick_disp]*0.5*args.cavity_radius
# move ligand chains
first_res = chain_endpts(n_res, chain_first_residue, rl_chains[0])[0]
next_first_res = chain_endpts(n_res, chain_first_residue, n_chains-1)[1]
pick_disp = random.choice([1, 3, 5])
pos[first_res*3:next_first_res*3,:,0] = pos[first_res*3:next_first_res*3,:,0] + displacement[pick_disp]*0.5*args.cavity_radius
t.root.input.pos[:] = pos
target = pos.copy()
if args.backbone:
require_affine = True
write_backbone_pair(fasta_seq)
if args.z_flat_bottom:
write_z_flat_bottom(parser,fasta_seq, args.z_flat_bottom)
if args.tension and args.ask_before_using_AFM:
print 'Nope, you cannot pull the protein using two modes. Choose one.'
elif args.tension and not args.ask_before_using_AFM:
write_tension(parser, fasta_seq, args.tension)
elif args.ask_before_using_AFM and not args.tension:
write_AFM(parser, fasta_seq, args.ask_before_using_AFM, args.AFM_time_initial, args.AFM_time_step)
if args.rotamer_interaction:
# must be after write_count_hbond if hbond_coverage is used
write_rotamer(fasta_seq, args.rotamer_interaction, args.rotamer_solve_damping, sc_node_name, pl_node_name)
if args.sidechain_radial:
require_backbone_point = True
write_sidechain_radial(fasta_seq, args.sidechain_radial, args.sidechain_radial_exclude_residues)
if args.membrane_potential:
if args.membrane_thickness is None:
parser.error('--membrane-potential requires --membrane-thickness')
require_backbone_point = True
write_membrane_potential(fasta_seq,
args.membrane_potential,
args.membrane_thickness,
args.membrane_exclude_residues,
args.hbond_exclude_residues)
if args.contact_energies:
require_backbone_point = True
write_contact_energies(parser, fasta_seq, args.contact_energies)
if require_backbone_point:
require_affine = True
write_CB(fasta_seq)
if require_rama:
write_rama_coord()
if require_affine:
write_affine_alignment(len(fasta_seq))
if args.apply_restraint_group_to_each_chain and n_chains > 1:
if has_rl_info:
# receptor chains
first_res = chain_endpts(n_res, chain_first_residue, 0)[0]
next_first_res = chain_endpts(n_res, chain_first_residue, rl_chains[0]-1)[1]
args.restraint_group.append(np.arange(first_res, next_first_res))
# ligand chains
first_res = chain_endpts(n_res, chain_first_residue, rl_chains[0])[0]
next_first_res = chain_endpts(n_res, chain_first_residue, n_chains-1)[1]
args.restraint_group.append(np.arange(first_res, next_first_res))
else:
for i in xrange(n_chains):
first_res, next_first_res = chain_endpts(n_res, chain_first_residue, i)
args.restraint_group.append(np.arange(first_res, next_first_res))
print
print "restraint_group"
print args.restraint_group
if args.restraint_group:
print
print 'Restraint groups (uppercase letters are restrained residues)'
fasta_one_letter = ''.join(one_letter_aa[x] for x in fasta_seq)
print
print "Restraint spring constant: {}".format(args.restraint_spring_constant)
for i,restrained_residues in enumerate(args.restraint_group):
assert np.amax(list(restrained_residues)) < len(fasta_seq)
highlight_residues('group_%i'%i, fasta_seq, restrained_residues)
make_restraint_group(i,set(restrained_residues),pos[:,:,0], args.restraint_spring_constant)
# if we have the necessary information, write pivot_sampler
if require_rama and 'rama_map_pot' in potential:
grp = t.create_group(input, 'pivot_moves')
pivot_atom = potential.rama_coord.id[:]
non_terminal_residue = np.array([not(np.int64(-1).astype(pivot_atom.dtype) in tuple(x))
for x in pivot_atom])
create_array(grp, 'proposal_pot', potential.rama_map_pot.rama_pot[:])
create_array(grp, 'pivot_atom', pivot_atom[non_terminal_residue])
create_array(grp, 'pivot_restype', potential.rama_map_pot.rama_map_id[:][non_terminal_residue])
create_array(grp, 'pivot_range', np.column_stack((grp.pivot_atom[:,4]+1,np.zeros(sum(non_terminal_residue),'i')+n_atom)))
t.close()
if __name__ == '__main__':
main()
| John-Jumper/Upside-MD | py/upside_config.py | Python | gpl-2.0 | 78,297 | [
"CRYSTAL"
] | aec557f35629630267062a9b30d7d2932b0bcf8f4801cd125373210c25e4d8b2 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page.page_set import PageSet
from telemetry.page.page import Page
class Alexa1To10000Page(Page):
def __init__(self, url, page_set):
super(Alexa1To10000Page, self).__init__(url=url, page_set=page_set)
self.make_javascript_deterministic = True
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Alexa1To10000PageSet(PageSet):
""" Top 1-10000 Alexa global.
Generated on 2013-09-03 13:59:53.459117 by rmistry using
create_page_set.py.
"""
def __init__(self):
super(Alexa1To10000PageSet, self).__init__(
make_javascript_deterministic=True,
user_agent_type='desktop')
urls_list = [
# Why: #1 in Alexa global
'http://www.facebook.com/',
# Why: #2 in Alexa global
'http://www.google.com/',
# Why: #3 in Alexa global
'http://www.youtube.com/',
# Why: #4 in Alexa global
'http://www.yahoo.com/',
# Why: #5 in Alexa global
'http://baidu.com/',
# Why: #6 in Alexa global
'http://www.amazon.com/',
# Why: #7 in Alexa global
'http://www.wikipedia.org/',
# Why: #8 in Alexa global
'http://www.qq.com/',
# Why: #9 in Alexa global
'http://www.live.com/',
# Why: #10 in Alexa global
'http://www.taobao.com/',
# Why: #11 in Alexa global
'http://www.google.co.in/',
# Why: #12 in Alexa global
'http://www.twitter.com/',
# Why: #13 in Alexa global
'http://www.blogspot.com/',
# Why: #14 in Alexa global
'http://www.linkedin.com/',
# Why: #15 in Alexa global
'http://www.yahoo.co.jp/',
# Why: #16 in Alexa global
'http://www.bing.com/',
# Why: #17 in Alexa global
'http://sina.com.cn/',
# Why: #18 in Alexa global
'http://www.yandex.ru/',
# Why: #19 in Alexa global
'http://www.vk.com/',
# Why: #20 in Alexa global
'http://www.ask.com/',
# Why: #21 in Alexa global
'http://www.ebay.com/',
# Why: #22 in Alexa global
'http://www.wordpress.com/',
# Why: #23 in Alexa global
'http://www.google.de/',
# Why: #24 in Alexa global
'http://www.msn.com/',
# Why: #25 in Alexa global
'http://www.tumblr.com/',
# Why: #26 in Alexa global
'http://163.com/',
# Why: #27 in Alexa global
'http://www.google.com.hk/',
# Why: #28 in Alexa global
'http://www.mail.ru/',
# Why: #29 in Alexa global
'http://www.google.co.uk/',
# Why: #30 in Alexa global
'http://hao123.com/',
# Why: #31 in Alexa global
'http://www.google.com.br/',
# Why: #32 in Alexa global
'http://www.amazon.co.jp/',
# Why: #33 in Alexa global
'http://www.weibo.com/',
# Why: #34 in Alexa global
'http://www.xvideos.com/',
# Why: #35 in Alexa global
'http://www.google.co.jp/',
# Why: #36 in Alexa global
'http://www.microsoft.com/',
# Why: #38 in Alexa global
'http://www.delta-search.com/',
# Why: #39 in Alexa global
'http://www.google.fr/',
# Why: #40 in Alexa global
'http://www.conduit.com/',
# Why: #41 in Alexa global
'http://www.fc2.com/',
# Why: #42 in Alexa global
'http://www.craigslist.org/',
# Why: #43 in Alexa global
'http://www.google.ru/',
# Why: #44 in Alexa global
'http://www.pinterest.com/',
# Why: #45 in Alexa global
'http://www.instagram.com/',
# Why: #46 in Alexa global
'http://www.tmall.com/',
# Why: #47 in Alexa global
'http://www.xhamster.com/',
# Why: #48 in Alexa global
'http://www.odnoklassniki.ru/',
# Why: #49 in Alexa global
'http://www.google.it/',
# Why: #50 in Alexa global
'http://www.sohu.com/',
# Why: #51 in Alexa global
'http://www.paypal.com/',
# Why: #52 in Alexa global
'http://www.babylon.com/',
# Why: #53 in Alexa global
'http://www.google.es/',
# Why: #54 in Alexa global
'http://www.imdb.com/',
# Why: #55 in Alexa global
'http://www.apple.com/',
# Why: #56 in Alexa global
'http://www.amazon.de/',
# Why: #58 in Alexa global
'http://www.bbc.co.uk/',
# Why: #59 in Alexa global
'http://www.adobe.com/',
# Why: #60 in Alexa global
'http://www.soso.com/',
# Why: #61 in Alexa global
'http://www.pornhub.com/',
# Why: #62 in Alexa global
'http://www.google.com.mx/',
# Why: #63 in Alexa global
'http://www.blogger.com/',
# Why: #64 in Alexa global
'http://www.neobux.com/',
# Why: #65 in Alexa global
'http://www.amazon.co.uk/',
# Why: #66 in Alexa global
'http://www.ifeng.com/',
# Why: #67 in Alexa global
'http://www.google.ca/',
# Why: #68 in Alexa global
'http://www.avg.com/',
# Why: #69 in Alexa global
'http://www.go.com/',
# Why: #70 in Alexa global
'http://www.xnxx.com/',
# Why: #71 in Alexa global
'http://www.blogspot.in/',
# Why: #72 in Alexa global
'http://www.alibaba.com/',
# Why: #73 in Alexa global
'http://www.aol.com/',
# Why: #74 in Alexa global
'http://www.buildathome.info/',
# Why: #75 in Alexa global
'http://www.cnn.com/',
# Why: #76 in Alexa global
'http://www.mywebsearch.com/',
# Why: #77 in Alexa global
'http://www.ku6.com/',
# Why: #79 in Alexa global
'http://www.alipay.com/',
# Why: #80 in Alexa global
'http://www.vube.com/',
# Why: #81 in Alexa global
'http://www.google.com.tr/',
# Why: #82 in Alexa global
'http://www.youku.com/',
# Why: #83 in Alexa global
'http://www.redtube.com/',
# Why: #84 in Alexa global
'http://www.dailymotion.com/',
# Why: #85 in Alexa global
'http://www.google.com.au/',
# Why: #86 in Alexa global
'http://www.adf.ly/',
# Why: #87 in Alexa global
'http://www.netflix.com/',
# Why: #88 in Alexa global
'http://www.adcash.com/',
# Why: #89 in Alexa global
'http://www.about.com/',
# Why: #90 in Alexa global
'http://www.google.pl/',
# Why: #91 in Alexa global
'http://www.imgur.com/',
# Why: #92 in Alexa global
'http://www.ebay.de/',
# Why: #93 in Alexa global
'http://www.amazon.fr/',
# Why: #94 in Alexa global
'http://www.flickr.com/',
# Why: #95 in Alexa global
'http://www.thepiratebay.sx/',
# Why: #96 in Alexa global
'http://www.youporn.com/',
# Why: #97 in Alexa global
'http://www.uol.com.br/',
# Why: #98 in Alexa global
'http://www.huffingtonpost.com/',
# Why: #99 in Alexa global
'http://www.stackoverflow.com/',
# Why: #100 in Alexa global
'http://www.jd.com/',
# Why: #101 in Alexa global
'http://t.co/',
# Why: #102 in Alexa global
'http://www.rakuten.co.jp/',
# Why: #103 in Alexa global
'http://www.livejasmin.com/',
# Why: #105 in Alexa global
'http://www.ebay.co.uk/',
# Why: #106 in Alexa global
'http://www.yieldmanager.com/',
# Why: #107 in Alexa global
'http://www.sogou.com/',
# Why: #108 in Alexa global
'http://www.globo.com/',
# Why: #109 in Alexa global
'http://www.softonic.com/',
# Why: #110 in Alexa global
'http://www.cnet.com/',
# Why: #111 in Alexa global
'http://www.livedoor.com/',
# Why: #112 in Alexa global
'http://www.nicovideo.jp/',
# Why: #113 in Alexa global
'http://www.directrev.com/',
# Why: #114 in Alexa global
'http://www.espn.go.com/',
# Why: #115 in Alexa global
'http://www.ameblo.jp/',
# Why: #116 in Alexa global
'http://www.indiatimes.com/',
# Why: #117 in Alexa global
'http://www.wordpress.org/',
# Why: #118 in Alexa global
'http://www.weather.com/',
# Why: #119 in Alexa global
'http://www.pixnet.net/',
# Why: #120 in Alexa global
'http://www.google.com.sa/',
# Why: #122 in Alexa global
'http://www.clkmon.com/',
# Why: #123 in Alexa global
'http://www.reddit.com/',
# Why: #124 in Alexa global
'http://www.amazon.it/',
# Why: #125 in Alexa global
'http://www.google.com.eg/',
# Why: #126 in Alexa global
'http://www.booking.com/',
# Why: #127 in Alexa global
'http://www.google.nl/',
# Why: #128 in Alexa global
'http://www.douban.com/',
# Why: #129 in Alexa global
'http://www.amazon.cn/',
# Why: #130 in Alexa global
'http://www.slideshare.net/',
# Why: #131 in Alexa global
'http://www.google.com.ar/',
# Why: #132 in Alexa global
'http://www.badoo.com/',
# Why: #133 in Alexa global
'http://www.dailymail.co.uk/',
# Why: #134 in Alexa global
'http://www.google.co.th/',
# Why: #135 in Alexa global
'http://www.ask.fm/',
# Why: #136 in Alexa global
'http://www.wikia.com/',
# Why: #137 in Alexa global
'http://www.godaddy.com/',
# Why: #138 in Alexa global
'http://www.google.com.tw/',
# Why: #139 in Alexa global
'http://www.xinhuanet.com/',
# Why: #140 in Alexa global
'http://www.mediafire.com/',
# Why: #141 in Alexa global
'http://www.deviantart.com/',
# Why: #142 in Alexa global
'http://www.google.com.pk/',
# Why: #143 in Alexa global
'http://www.bankofamerica.com/',
# Why: #144 in Alexa global
'http://www.amazon.es/',
# Why: #145 in Alexa global
'http://www.blogfa.com/',
# Why: #146 in Alexa global
'http://www.nytimes.com/',
# Why: #147 in Alexa global
'http://www.4shared.com/',
# Why: #148 in Alexa global
'http://www.google.co.id/',
# Why: #149 in Alexa global
'http://www.youjizz.com/',
# Why: #150 in Alexa global
'http://www.amazonaws.com/',
# Why: #151 in Alexa global
'http://www.tube8.com/',
# Why: #152 in Alexa global
'http://www.kickass.to/',
# Why: #154 in Alexa global
'http://www.livejournal.com/',
# Why: #155 in Alexa global
'http://www.snapdo.com/',
# Why: #156 in Alexa global
'http://www.google.co.za/',
# Why: #158 in Alexa global
'http://www.vimeo.com/',
# Why: #160 in Alexa global
'http://www.wigetmedia.com/',
# Why: #161 in Alexa global
'http://www.yelp.com/',
# Why: #162 in Alexa global
'http://www.outbrain.com/',
# Why: #163 in Alexa global
'http://www.dropbox.com/',
# Why: #164 in Alexa global
'http://www.siteadvisor.com/',
# Why: #165 in Alexa global
'http://www.foxnews.com/',
# Why: #166 in Alexa global
'http://www.renren.com/',
# Why: #167 in Alexa global
'http://www.aliexpress.com/',
# Why: #168 in Alexa global
'http://www.walmart.com/',
# Why: #169 in Alexa global
'http://www.skype.com/',
# Why: #170 in Alexa global
'http://www.ilivid.com/',
# Why: #171 in Alexa global
'http://www.bizcoaching.info/',
# Why: #172 in Alexa global
'http://www.google.cn/',
# Why: #173 in Alexa global
'http://www.wikimedia.org/',
# Why: #174 in Alexa global
'http://people.com.cn/',
# Why: #175 in Alexa global
'http://www.flipkart.com/',
# Why: #176 in Alexa global
'http://www.zedo.com/',
# Why: #177 in Alexa global
'http://tianya.cn/',
# Why: #178 in Alexa global
'http://www.searchnu.com/',
# Why: #179 in Alexa global
'http://www.indeed.com/',
# Why: #180 in Alexa global
'http://www.leboncoin.fr/',
# Why: #181 in Alexa global
'http://www.goo.ne.jp/',
# Why: #182 in Alexa global
'http://www.liveinternet.ru/',
# Why: #183 in Alexa global
'http://www.google.co.ve/',
# Why: #184 in Alexa global
'http://www.56.com/',
# Why: #185 in Alexa global
'http://www.google.com.vn/',
# Why: #186 in Alexa global
'http://www.google.gr/',
# Why: #187 in Alexa global
'http://www.comcast.net/',
# Why: #188 in Alexa global
'http://www.torrentz.eu/',
# Why: #189 in Alexa global
'http://www.etsy.com/',
# Why: #190 in Alexa global
'http://www.orange.fr/',
# Why: #191 in Alexa global
'http://www.systweak.com/',
# Why: #192 in Alexa global
'http://www.onet.pl/',
# Why: #193 in Alexa global
'http://www.wellsfargo.com/',
# Why: #194 in Alexa global
'http://pconline.com.cn/',
# Why: #195 in Alexa global
'http://www.letv.com/',
# Why: #196 in Alexa global
'http://www.goodgamestudios.com/',
# Why: #197 in Alexa global
'http://www.secureserver.net/',
# Why: #198 in Alexa global
'http://www.allegro.pl/',
# Why: #199 in Alexa global
'http://www.themeforest.net/',
# Why: #200 in Alexa global
'http://www.china.com.cn/',
# Why: #201 in Alexa global
'http://www.tripadvisor.com/',
# Why: #202 in Alexa global
'http://www.web.de/',
# Why: #203 in Alexa global
'http://www.answers.com/',
# Why: #204 in Alexa global
'http://www.amazon.ca/',
# Why: #205 in Alexa global
'http://www.mozilla.org/',
# Why: #206 in Alexa global
'http://www.guardian.co.uk/',
# Why: #207 in Alexa global
'http://www.stumbleupon.com/',
# Why: #208 in Alexa global
'http://www.hardsextube.com/',
# Why: #209 in Alexa global
'http://www.espncricinfo.com/',
# Why: #210 in Alexa global
'http://www.gmx.net/',
# Why: #211 in Alexa global
'http://www.photobucket.com/',
# Why: #212 in Alexa global
'http://www.ehow.com/',
# Why: #213 in Alexa global
'http://www.rediff.com/',
# Why: #214 in Alexa global
'http://www.popads.net/',
# Why: #215 in Alexa global
'http://www.wikihow.com/',
# Why: #216 in Alexa global
'http://www.search-results.com/',
# Why: #217 in Alexa global
'http://www.fiverr.com/',
# Why: #218 in Alexa global
'http://www.google.com.ua/',
# Why: #219 in Alexa global
'http://www.files.wordpress.com/',
# Why: #220 in Alexa global
'http://www.onlineaway.net/',
# Why: #221 in Alexa global
'http://www.nbcnews.com/',
# Why: #222 in Alexa global
'http://www.google.com.co/',
# Why: #223 in Alexa global
'http://www.hootsuite.com/',
# Why: #224 in Alexa global
'http://www.4dsply.com/',
# Why: #225 in Alexa global
'http://www.google.ro/',
# Why: #227 in Alexa global
'http://www.sourceforge.net/',
# Why: #228 in Alexa global
'http://www.cnzz.com/',
# Why: #229 in Alexa global
'http://www.java.com/',
# Why: #230 in Alexa global
'http://www.hudong.com/',
# Why: #231 in Alexa global
'http://www.ucoz.ru/',
# Why: #232 in Alexa global
'http://www.tudou.com/',
# Why: #233 in Alexa global
'http://www.addthis.com/',
# Why: #234 in Alexa global
'http://zol.com.cn/',
# Why: #235 in Alexa global
'http://www.google.com.ng/',
# Why: #236 in Alexa global
'http://www.soundcloud.com/',
# Why: #237 in Alexa global
'http://www.onclickads.net/',
# Why: #238 in Alexa global
'http://www.google.com.ph/',
# Why: #239 in Alexa global
'http://www.dmm.co.jp/',
# Why: #240 in Alexa global
'http://www.reference.com/',
# Why: #241 in Alexa global
'http://www.google.be/',
# Why: #242 in Alexa global
'http://www.wp.pl/',
# Why: #243 in Alexa global
'http://www.interbiz.me/',
# Why: #244 in Alexa global
'http://www.beeg.com/',
# Why: #245 in Alexa global
'http://www.rambler.ru/',
# Why: #246 in Alexa global
'http://www.sweetim.com/',
# Why: #247 in Alexa global
'http://www.aweber.com/',
# Why: #248 in Alexa global
'http://www.google.com.my/',
# Why: #249 in Alexa global
'http://www.pandora.com/',
# Why: #250 in Alexa global
'http://www.w3schools.com/',
# Why: #251 in Alexa global
'http://www.pengyou.com/',
# Why: #252 in Alexa global
'http://www.archive.org/',
# Why: #253 in Alexa global
'http://www.qvo6.com/',
# Why: #254 in Alexa global
'http://www.bet365.com/',
# Why: #255 in Alexa global
'http://www.etao.com/',
# Why: #256 in Alexa global
'http://www.lollipop-network.com/',
# Why: #257 in Alexa global
'http://www.qtrax.com/',
# Why: #258 in Alexa global
'http://www.naver.jp/',
# Why: #259 in Alexa global
'http://www.google.se/',
# Why: #260 in Alexa global
'http://www.google.dz/',
# Why: #261 in Alexa global
'http://www.usatoday.com/',
# Why: #262 in Alexa global
'http://www.zillow.com/',
# Why: #263 in Alexa global
'http://www.goal.com/',
# Why: #264 in Alexa global
'http://www.avito.ru/',
# Why: #265 in Alexa global
'http://kaixin001.com/',
# Why: #266 in Alexa global
'http://yesky.com/',
# Why: #267 in Alexa global
'http://www.mobile01.com/',
# Why: #268 in Alexa global
'http://www.soufun.com/',
# Why: #269 in Alexa global
'http://www.tagged.com/',
# Why: #270 in Alexa global
'http://www.warriorforum.com/',
# Why: #271 in Alexa global
'http://www.statcounter.com/',
# Why: #272 in Alexa global
'http://www.google.com.pe/',
# Why: #273 in Alexa global
'http://www.libero.it/',
# Why: #274 in Alexa global
'http://www.thefreedictionary.com/',
# Why: #275 in Alexa global
'http://www.soku.com/',
# Why: #276 in Alexa global
'http://www.incredibar.com/',
# Why: #277 in Alexa global
'http://www.kaskus.co.id/',
# Why: #278 in Alexa global
'http://www.likes.com/',
# Why: #279 in Alexa global
'http://www.weebly.com/',
# Why: #280 in Alexa global
'http://iqiyi.com/',
# Why: #281 in Alexa global
'http://www.pch.com/',
# Why: #282 in Alexa global
'http://www.ameba.jp/',
# Why: #284 in Alexa global
'http://www.samsung.com/',
# Why: #285 in Alexa global
'http://www.linkbucks.com/',
# Why: #286 in Alexa global
'http://www.uploaded.net/',
# Why: #287 in Alexa global
'http://www.bild.de/',
# Why: #288 in Alexa global
'http://www.google.com.bd/',
# Why: #289 in Alexa global
'http://www.google.at/',
# Why: #290 in Alexa global
'http://www.webcrawler.com/',
# Why: #291 in Alexa global
'http://www.t-online.de/',
# Why: #292 in Alexa global
'http://www.iminent.com/',
# Why: #293 in Alexa global
'http://www.google.pt/',
# Why: #294 in Alexa global
'http://www.detik.com/',
# Why: #295 in Alexa global
'http://www.ganji.com/',
# Why: #296 in Alexa global
'http://www.milliyet.com.tr/',
# Why: #297 in Alexa global
'http://www.bleacherreport.com/',
# Why: #298 in Alexa global
'http://www.forbes.com/',
# Why: #299 in Alexa global
'http://www.twoo.com/',
# Why: #300 in Alexa global
'http://www.olx.in/',
# Why: #301 in Alexa global
'http://www.mercadolivre.com.br/',
# Why: #302 in Alexa global
'http://www.hurriyet.com.tr/',
# Why: #303 in Alexa global
'http://www.pof.com/',
# Why: #304 in Alexa global
'http://www.wsj.com/',
# Why: #305 in Alexa global
'http://www.hostgator.com/',
# Why: #306 in Alexa global
'http://www.naver.com/',
# Why: #307 in Alexa global
'http://www.putlocker.com/',
# Why: #308 in Alexa global
'http://www.varzesh3.com/',
# Why: #309 in Alexa global
'http://www.rutracker.org/',
# Why: #311 in Alexa global
'http://www.optmd.com/',
# Why: #312 in Alexa global
'http://www.youm7.com/',
# Why: #313 in Alexa global
'http://www.google.cl/',
# Why: #314 in Alexa global
'http://www.ikea.com/',
# Why: #316 in Alexa global
'http://www.4399.com/',
# Why: #317 in Alexa global
'http://www.salesforce.com/',
# Why: #318 in Alexa global
'http://www.scribd.com/',
# Why: #319 in Alexa global
'http://www.google.com.sg/',
# Why: #320 in Alexa global
'http://it168.com/',
# Why: #321 in Alexa global
'http://www.goodreads.com/',
# Why: #322 in Alexa global
'http://www.target.com/',
# Why: #323 in Alexa global
'http://www.xunlei.com/',
# Why: #324 in Alexa global
'http://www.hulu.com/',
# Why: #325 in Alexa global
'http://www.github.com/',
# Why: #326 in Alexa global
'http://www.hp.com/',
# Why: #327 in Alexa global
'http://www.buzzfeed.com/',
# Why: #328 in Alexa global
'http://www.google.ch/',
# Why: #329 in Alexa global
'http://www.youdao.com/',
# Why: #330 in Alexa global
'http://www.blogspot.com.es/',
# Why: #331 in Alexa global
'http://so.com/',
# Why: #332 in Alexa global
'http://www.ups.com/',
# Why: #333 in Alexa global
'http://www.google.co.kr/',
# Why: #334 in Alexa global
'http://www.extratorrent.com/',
# Why: #335 in Alexa global
'http://www.match.com/',
# Why: #336 in Alexa global
'http://www.seznam.cz/',
# Why: #337 in Alexa global
'http://autohome.com.cn/',
# Why: #338 in Alexa global
'http://www.naukri.com/',
# Why: #339 in Alexa global
'http://www.gmw.cn/',
# Why: #340 in Alexa global
'http://www.drtuber.com/',
# Why: #341 in Alexa global
'http://www.spiegel.de/',
# Why: #342 in Alexa global
'http://www.marca.com/',
# Why: #343 in Alexa global
'http://www.ign.com/',
# Why: #344 in Alexa global
'http://www.domaintools.com/',
# Why: #345 in Alexa global
'http://www.free.fr/',
# Why: #346 in Alexa global
'http://www.telegraph.co.uk/',
# Why: #347 in Alexa global
'http://www.mypcbackup.com/',
# Why: #348 in Alexa global
'http://www.kakaku.com/',
# Why: #349 in Alexa global
'http://www.imageshack.us/',
# Why: #350 in Alexa global
'http://www.reuters.com/',
# Why: #351 in Alexa global
'http://www.ndtv.com/',
# Why: #352 in Alexa global
'http://www.ig.com.br/',
# Why: #353 in Alexa global
'http://www.bestbuy.com/',
# Why: #354 in Alexa global
'http://www.glispa.com/',
# Why: #355 in Alexa global
'http://www.quikr.com/',
# Why: #356 in Alexa global
'http://www.deadlyblessing.com/',
# Why: #357 in Alexa global
'http://www.wix.com/',
# Why: #358 in Alexa global
'http://xcar.com.cn/',
# Why: #359 in Alexa global
'http://paipai.com/',
# Why: #360 in Alexa global
'http://www.ebay.com.au/',
# Why: #361 in Alexa global
'http://www.yandex.ua/',
# Why: #362 in Alexa global
'http://chinanews.com/',
# Why: #363 in Alexa global
'http://www.clixsense.com/',
# Why: #364 in Alexa global
'http://nih.gov/',
# Why: #365 in Alexa global
'http://www.aili.com/',
# Why: #366 in Alexa global
'http://www.zing.vn/',
# Why: #367 in Alexa global
'http://pchome.net/',
# Why: #369 in Alexa global
'http://www.webmd.com/',
# Why: #370 in Alexa global
'http://www.terra.com.br/',
# Why: #371 in Alexa global
'http://pixiv.net/',
# Why: #372 in Alexa global
'http://www.in.com/',
# Why: #373 in Alexa global
'http://csdn.net/',
# Why: #374 in Alexa global
'http://www.pcpop.com/',
# Why: #375 in Alexa global
'http://www.google.co.hu/',
# Why: #376 in Alexa global
'http://www.lnksr.com/',
# Why: #377 in Alexa global
'http://www.jobrapido.com/',
# Why: #378 in Alexa global
'http://inbox.com/',
# Why: #379 in Alexa global
'http://dianping.com/',
# Why: #380 in Alexa global
'http://www.gsmarena.com/',
# Why: #381 in Alexa global
'http://www.mlb.com/',
# Why: #382 in Alexa global
'http://www.clicksor.com/',
# Why: #383 in Alexa global
'http://www.hdfcbank.com/',
# Why: #384 in Alexa global
'http://www.acesse.com/',
# Why: #385 in Alexa global
'http://www.homedepot.com/',
# Why: #386 in Alexa global
'http://www.twitch.tv/',
# Why: #387 in Alexa global
'http://www.morefreecamsecrets.com/',
# Why: #388 in Alexa global
'http://www.groupon.com/',
# Why: #389 in Alexa global
'http://www.lnksdata.com/',
# Why: #390 in Alexa global
'http://enet.com.cn/',
# Why: #391 in Alexa global
'http://www.google.cz/',
# Why: #392 in Alexa global
'http://www.usps.com/',
# Why: #393 in Alexa global
'http://xyxy.net/',
# Why: #394 in Alexa global
'http://www.att.com/',
# Why: #395 in Alexa global
'http://webs.com/',
# Why: #396 in Alexa global
'http://51job.com/',
# Why: #397 in Alexa global
'http://www.mashable.com/',
# Why: #398 in Alexa global
'http://www.yihaodian.com/',
# Why: #399 in Alexa global
'http://taringa.net/',
# Why: #400 in Alexa global
'http://www.fedex.com/',
# Why: #401 in Alexa global
'http://blogspot.co.uk/',
# Why: #402 in Alexa global
'http://www.ck101.com/',
# Why: #403 in Alexa global
'http://www.abcnews.go.com/',
# Why: #404 in Alexa global
'http://www.washingtonpost.com/',
# Why: #405 in Alexa global
'http://www.narod.ru/',
# Why: #406 in Alexa global
'http://www.china.com/',
# Why: #407 in Alexa global
'http://www.doubleclick.com/',
# Why: #409 in Alexa global
'http://www.cam4.com/',
# Why: #410 in Alexa global
'http://www.google.ie/',
# Why: #411 in Alexa global
'http://dangdang.com/',
# Why: #412 in Alexa global
'http://americanexpress.com/',
# Why: #413 in Alexa global
'http://www.disqus.com/',
# Why: #414 in Alexa global
'http://www.ixxx.com/',
# Why: #415 in Alexa global
'http://39.net/',
# Why: #416 in Alexa global
'http://www.isohunt.com/',
# Why: #417 in Alexa global
'http://php.net/',
# Why: #418 in Alexa global
'http://www.exoclick.com/',
# Why: #419 in Alexa global
'http://www.shutterstock.com/',
# Why: #420 in Alexa global
'http://www.dell.com/',
# Why: #421 in Alexa global
'http://www.google.ae/',
# Why: #422 in Alexa global
'http://histats.com/',
# Why: #423 in Alexa global
'http://www.outlook.com/',
# Why: #424 in Alexa global
'http://www.wordreference.com/',
# Why: #425 in Alexa global
'http://sahibinden.com/',
# Why: #426 in Alexa global
'http://www.126.com/',
# Why: #427 in Alexa global
'http://oyodomo.com/',
# Why: #428 in Alexa global
'http://www.gazeta.pl/',
# Why: #429 in Alexa global
'http://www.expedia.com/',
# Why: #430 in Alexa global
'http://cntv.cn/',
# Why: #431 in Alexa global
'http://www.kijiji.ca/',
# Why: #432 in Alexa global
'http://www.myfreecams.com/',
# Why: #433 in Alexa global
'http://rednet.cn/',
# Why: #434 in Alexa global
'http://www.capitalone.com/',
# Why: #435 in Alexa global
'http://moz.com/',
# Why: #436 in Alexa global
'http://qunar.com/',
# Why: #437 in Alexa global
'http://www.taleo.net/',
# Why: #438 in Alexa global
'http://www.google.co.il/',
# Why: #439 in Alexa global
'http://www.microsoftonline.com/',
# Why: #440 in Alexa global
'http://datasrvrs.com/',
# Why: #441 in Alexa global
'http://www.zippyshare.com/',
# Why: #442 in Alexa global
'http://google.no/',
# Why: #444 in Alexa global
'http://justdial.com/',
# Why: #445 in Alexa global
'http://www.2345.com/',
# Why: #446 in Alexa global
'http://adultfriendfinder.com/',
# Why: #447 in Alexa global
'http://www.shaadi.com/',
# Why: #448 in Alexa global
'http://www.mobile.de/',
# Why: #449 in Alexa global
'http://abril.com.br/',
# Why: #450 in Alexa global
'http://empowernetwork.com/',
# Why: #451 in Alexa global
'http://www.icicibank.com/',
# Why: #452 in Alexa global
'http://xe.com/',
# Why: #454 in Alexa global
'http://www.mailchimp.com/',
# Why: #455 in Alexa global
'http://fbcdn.net/',
# Why: #456 in Alexa global
'http://www.ccb.com/',
# Why: #457 in Alexa global
'http://huanqiu.com/',
# Why: #458 in Alexa global
'http://www.seesaa.net/',
# Why: #459 in Alexa global
'http://jimdo.com/',
# Why: #460 in Alexa global
'http://fucked-tube.com/',
# Why: #461 in Alexa global
'http://google.dk/',
# Why: #462 in Alexa global
'http://www.yellowpages.com/',
# Why: #463 in Alexa global
'http://www.constantcontact.com/',
# Why: #464 in Alexa global
'http://www.tinyurl.com/',
# Why: #465 in Alexa global
'http://mysearchresults.com/',
# Why: #466 in Alexa global
'http://www.friv.com/',
# Why: #467 in Alexa global
'http://www.ebay.it/',
# Why: #468 in Alexa global
'http://www.aizhan.com/',
# Why: #469 in Alexa global
'http://accuweather.com/',
# Why: #470 in Alexa global
'http://www.51buy.com/',
# Why: #472 in Alexa global
'http://www.snapdeal.com/',
# Why: #473 in Alexa global
'http://google.az/',
# Why: #474 in Alexa global
'http://pogo.com/',
# Why: #475 in Alexa global
'http://www.adultadworld.com/',
# Why: #476 in Alexa global
'http://www.nifty.com/',
# Why: #477 in Alexa global
'http://bitauto.com/',
# Why: #478 in Alexa global
'http://drudgereport.com/',
# Why: #479 in Alexa global
'http://www.bloomberg.com/',
# Why: #480 in Alexa global
'http://www.vnexpress.net/',
# Why: #481 in Alexa global
'http://eastmoney.com/',
# Why: #482 in Alexa global
'http://www.verizonwireless.com/',
# Why: #483 in Alexa global
'http://pcauto.com.cn/',
# Why: #485 in Alexa global
'http://www.onlinesbi.com/',
# Why: #486 in Alexa global
'http://www.2ch.net/',
# Why: #487 in Alexa global
'http://speedtest.net/',
# Why: #488 in Alexa global
'http://www.largeporntube.com/',
# Why: #489 in Alexa global
'http://www.stackexchange.com/',
# Why: #490 in Alexa global
'http://roblox.com/',
# Why: #492 in Alexa global
'http://pclady.com.cn/',
# Why: #493 in Alexa global
'http://miniclip.com/',
# Why: #495 in Alexa global
'http://www.tmz.com/',
# Why: #496 in Alexa global
'http://google.fi/',
# Why: #497 in Alexa global
'http://ning.com/',
# Why: #498 in Alexa global
'http://monster.com/',
# Why: #499 in Alexa global
'http://www.jrj.com.cn/',
# Why: #500 in Alexa global
'http://www.mihanblog.com/',
# Why: #501 in Alexa global
'http://www.biglobe.ne.jp/',
# Why: #502 in Alexa global
'http://steampowered.com/',
# Why: #503 in Alexa global
'http://www.nuvid.com/',
# Why: #504 in Alexa global
'http://kooora.com/',
# Why: #505 in Alexa global
'http://ebay.in/',
# Why: #506 in Alexa global
'http://mp3skull.com/',
# Why: #507 in Alexa global
'http://www.icbc.com.cn/',
# Why: #508 in Alexa global
'http://blogspot.ru/',
# Why: #509 in Alexa global
'http://duowan.com/',
# Why: #510 in Alexa global
'http://www.blogspot.de/',
# Why: #511 in Alexa global
'http://www.fhserve.com/',
# Why: #512 in Alexa global
'http://moneycontrol.com/',
# Why: #513 in Alexa global
'http://pornerbros.com/',
# Why: #514 in Alexa global
'http://eazel.com/',
# Why: #515 in Alexa global
'http://xgo.com.cn/',
# Why: #516 in Alexa global
'http://daum.net/',
# Why: #517 in Alexa global
'http://www.10086.cn/',
# Why: #518 in Alexa global
'http://lady8844.com/',
# Why: #519 in Alexa global
'http://www.rapidgator.net/',
# Why: #520 in Alexa global
'http://thesun.co.uk/',
# Why: #521 in Alexa global
'http://youtube-mp3.org/',
# Why: #522 in Alexa global
'http://www.v9.com/',
# Why: #523 in Alexa global
'http://www.disney.go.com/',
# Why: #524 in Alexa global
'http://www.homeway.com.cn/',
# Why: #525 in Alexa global
'http://porntube.com/',
# Why: #526 in Alexa global
'http://www.surveymonkey.com/',
# Why: #527 in Alexa global
'http://www.meetup.com/',
# Why: #528 in Alexa global
'http://www.ero-advertising.com/',
# Why: #529 in Alexa global
'http://www.bravotube.net/',
# Why: #530 in Alexa global
'http://appround.biz/',
# Why: #531 in Alexa global
'http://blogspot.it/',
# Why: #532 in Alexa global
'http://ctrip.com/',
# Why: #533 in Alexa global
'http://www.9gag.com/',
# Why: #534 in Alexa global
'http://www.odesk.com/',
# Why: #535 in Alexa global
'http://www.kinopoisk.ru/',
# Why: #536 in Alexa global
'http://www.trulia.com/',
# Why: #537 in Alexa global
'http://www.mercadolibre.com.ar/',
# Why: #538 in Alexa global
'http://www.repubblica.it/',
# Why: #539 in Alexa global
'http://hupu.com/',
# Why: #540 in Alexa global
'http://www.imesh.com/',
# Why: #541 in Alexa global
'http://searchfunmoods.com/',
# Why: #542 in Alexa global
'http://www.backpage.com/',
# Why: #543 in Alexa global
'http://latimes.com/',
# Why: #544 in Alexa global
'http://www.news.com.au/',
# Why: #545 in Alexa global
'http://www.gc.ca/',
# Why: #546 in Alexa global
'http://ce.cn/',
# Why: #547 in Alexa global
'http://www.hubpages.com/',
# Why: #548 in Alexa global
'http://www.clickbank.com/',
# Why: #549 in Alexa global
'http://www.mapquest.com/',
# Why: #550 in Alexa global
'http://www.sweetpacks.com/',
# Why: #551 in Alexa global
'http://www.hypergames.net/',
# Why: #552 in Alexa global
'http://alimama.com/',
# Why: #553 in Alexa global
'http://www.cnblogs.com/',
# Why: #554 in Alexa global
'http://www.vancl.com/',
# Why: #555 in Alexa global
'http://www.bitly.com/',
# Why: #556 in Alexa global
'http://www.tokobagus.com/',
# Why: #557 in Alexa global
'http://www.webmoney.ru/',
# Why: #558 in Alexa global
'http://www.google.sk/',
# Why: #559 in Alexa global
'http://www.shopathome.com/',
# Why: #560 in Alexa global
'http://elpais.com/',
# Why: #561 in Alexa global
'http://www.oneindia.in/',
# Why: #562 in Alexa global
'http://www.codecanyon.net/',
# Why: #563 in Alexa global
'http://www.businessinsider.com/',
# Why: #564 in Alexa global
'http://www.blackhatworld.com/',
# Why: #565 in Alexa global
'http://www.farsnews.com/',
# Why: #566 in Alexa global
'http://www.spankwire.com/',
# Why: #567 in Alexa global
'http://www.mynet.com/',
# Why: #568 in Alexa global
'http://www.sape.ru/',
# Why: #569 in Alexa global
'http://www.bhaskar.com/',
# Why: #570 in Alexa global
'http://www.lenta.ru/',
# Why: #571 in Alexa global
'http://www.gutefrage.net/',
# Why: #572 in Alexa global
'http://www.nba.com/',
# Why: #573 in Alexa global
'http://www.feedly.com/',
# Why: #574 in Alexa global
'http://www.chaturbate.com/',
# Why: #575 in Alexa global
'http://elmundo.es/',
# Why: #576 in Alexa global
'http://www.ad6media.fr/',
# Why: #577 in Alexa global
'http://www.sberbank.ru/',
# Why: #578 in Alexa global
'http://www.lockyourhome.com/',
# Why: #579 in Alexa global
'http://kinox.to/',
# Why: #580 in Alexa global
'http://www.subito.it/',
# Why: #581 in Alexa global
'http://www.rbc.ru/',
# Why: #582 in Alexa global
'http://sfr.fr/',
# Why: #584 in Alexa global
'http://www.skyrock.com/',
# Why: #585 in Alexa global
'http://priceline.com/',
# Why: #586 in Alexa global
'http://www.jabong.com/',
# Why: #587 in Alexa global
'http://www.y8.com/',
# Why: #588 in Alexa global
'http://www.wunderground.com/',
# Why: #589 in Alexa global
'http://mixi.jp/',
# Why: #590 in Alexa global
'http://www.habrahabr.ru/',
# Why: #591 in Alexa global
'http://www.softpedia.com/',
# Why: #592 in Alexa global
'http://www.ancestry.com/',
# Why: #593 in Alexa global
'http://bluehost.com/',
# Why: #594 in Alexa global
'http://www.123rf.com/',
# Why: #595 in Alexa global
'http://lowes.com/',
# Why: #596 in Alexa global
'http://www.free-tv-video-online.me/',
# Why: #597 in Alexa global
'http://tabelog.com/',
# Why: #598 in Alexa global
'http://www.vehnix.com/',
# Why: #599 in Alexa global
'http://55bbs.com/',
# Why: #600 in Alexa global
'http://www.swagbucks.com/',
# Why: #601 in Alexa global
'http://www.speedanalysis.net/',
# Why: #603 in Alexa global
'http://www.virgilio.it/',
# Why: #604 in Alexa global
'http://www.peyvandha.ir/',
# Why: #605 in Alexa global
'http://www.infusionsoft.com/',
# Why: #606 in Alexa global
'http://newegg.com/',
# Why: #608 in Alexa global
'http://www.sulekha.com/',
# Why: #609 in Alexa global
'http://myspace.com/',
# Why: #610 in Alexa global
'http://yxlady.com/',
# Why: #611 in Alexa global
'http://www.haber7.com/',
# Why: #612 in Alexa global
'http://www.w3.org/',
# Why: #613 in Alexa global
'http://squidoo.com/',
# Why: #614 in Alexa global
'http://www.hotels.com/',
# Why: #615 in Alexa global
'http://oracle.com/',
# Why: #616 in Alexa global
'http://fatakat.com/',
# Why: #617 in Alexa global
'http://www.joomla.org/',
# Why: #618 in Alexa global
'http://qidian.com/',
# Why: #619 in Alexa global
'http://hatena.ne.jp/',
# Why: #620 in Alexa global
'http://adbooth.net/',
# Why: #621 in Alexa global
'http://wretch.cc/',
# Why: #622 in Alexa global
'http://www.freelancer.com/',
# Why: #623 in Alexa global
'http://www.typepad.com/',
# Why: #624 in Alexa global
'http://foxsports.com/',
# Why: #625 in Alexa global
'http://www.allrecipes.com/',
# Why: #626 in Alexa global
'http://www.searchengines.ru/',
# Why: #628 in Alexa global
'http://babytree.com/',
# Why: #629 in Alexa global
'http://interia.pl/',
# Why: #630 in Alexa global
'http://xhamstercams.com/',
# Why: #632 in Alexa global
'http://www.verizon.com/',
# Why: #633 in Alexa global
'http://intoday.in/',
# Why: #634 in Alexa global
'http://sears.com/',
# Why: #635 in Alexa global
'http://www.okcupid.com/',
# Why: #636 in Alexa global
'http://6.cn/',
# Why: #637 in Alexa global
'http://kompas.com/',
# Why: #638 in Alexa global
'http://cj.com/',
# Why: #639 in Alexa global
'http://www.4tube.com/',
# Why: #640 in Alexa global
'http://www.chip.de/',
# Why: #641 in Alexa global
'http://force.com/',
# Why: #643 in Alexa global
'http://www.advertserve.com/',
# Why: #644 in Alexa global
'http://maktoob.com/',
# Why: #645 in Alexa global
'http://www.24h.com.vn/',
# Why: #646 in Alexa global
'http://foursquare.com/',
# Why: #647 in Alexa global
'http://cbsnews.com/',
# Why: #648 in Alexa global
'http://pornhublive.com/',
# Why: #649 in Alexa global
'http://www.xda-developers.com/',
# Why: #651 in Alexa global
'http://www.milanuncios.com/',
# Why: #652 in Alexa global
'http://retailmenot.com/',
# Why: #653 in Alexa global
'http://www.keezmovies.com/',
# Why: #654 in Alexa global
'http://nydailynews.com/',
# Why: #655 in Alexa global
'http://h2porn.com/',
# Why: #656 in Alexa global
'http://www.careerbuilder.com/',
# Why: #657 in Alexa global
'http://xing.com/',
# Why: #658 in Alexa global
'http://www.sakura.ne.jp/',
# Why: #659 in Alexa global
'http://citibank.com/',
# Why: #660 in Alexa global
'http://www.linkwithin.com/',
# Why: #661 in Alexa global
'http://www.blogspot.jp/',
# Why: #662 in Alexa global
'http://singlessalad.com/',
# Why: #663 in Alexa global
'http://www.altervista.org/',
# Why: #664 in Alexa global
'http://www.turbobit.net/',
# Why: #665 in Alexa global
'http://www.zoosk.com/',
# Why: #666 in Alexa global
'http://www.digg.com/',
# Why: #667 in Alexa global
'http://hespress.com/',
# Why: #668 in Alexa global
'http://bigpoint.com/',
# Why: #669 in Alexa global
'http://www.yourlust.com/',
# Why: #670 in Alexa global
'http://www.myntra.com/',
# Why: #671 in Alexa global
'http://issuu.com/',
# Why: #672 in Alexa global
'http://macys.com/',
# Why: #673 in Alexa global
'http://google.bg/',
# Why: #674 in Alexa global
'http://github.io/',
# Why: #675 in Alexa global
'http://filestube.com/',
# Why: #677 in Alexa global
'http://cmbchina.com/',
# Why: #678 in Alexa global
'http://irctc.co.in/',
# Why: #679 in Alexa global
'http://filehippo.com/',
# Why: #680 in Alexa global
'http://mop.com/',
# Why: #681 in Alexa global
'http://bodybuilding.com/',
# Why: #682 in Alexa global
'http://www.paidui.com/',
# Why: #683 in Alexa global
'http://zimbio.com/',
# Why: #684 in Alexa global
'http://www.panet.co.il/',
# Why: #685 in Alexa global
'http://www.mgid.com/',
# Why: #686 in Alexa global
'http://www.ya.ru/',
# Why: #687 in Alexa global
'http://probux.com/',
# Why: #688 in Alexa global
'http://haberturk.com/',
# Why: #689 in Alexa global
'http://persianblog.ir/',
# Why: #690 in Alexa global
'http://meituan.com/',
# Why: #691 in Alexa global
'http://www.mercadolibre.com.mx/',
# Why: #692 in Alexa global
'http://ppstream.com/',
# Why: #693 in Alexa global
'http://www.atwiki.jp/',
# Why: #694 in Alexa global
'http://sunporno.com/',
# Why: #695 in Alexa global
'http://vodly.to/',
# Why: #696 in Alexa global
'http://forgeofempires.com/',
# Why: #697 in Alexa global
'http://elance.com/',
# Why: #698 in Alexa global
'http://adscale.de/',
# Why: #699 in Alexa global
'http://vipshop.com/',
# Why: #700 in Alexa global
'http://babycenter.com/',
# Why: #701 in Alexa global
'http://istockphoto.com/',
# Why: #702 in Alexa global
'http://www.commentcamarche.net/',
# Why: #704 in Alexa global
'http://upworthy.com/',
# Why: #705 in Alexa global
'http://www.download.com/',
# Why: #706 in Alexa global
'http://www.so-net.ne.jp/',
# Why: #707 in Alexa global
'http://battle.net/',
# Why: #708 in Alexa global
'http://beva.com/',
# Why: #709 in Alexa global
'http://list-manage.com/',
# Why: #710 in Alexa global
'http://www.corriere.it/',
# Why: #711 in Alexa global
'http://noticias24.com/',
# Why: #712 in Alexa global
'http://www.ucoz.com/',
# Why: #713 in Alexa global
'http://www.porn.com/',
# Why: #714 in Alexa global
'http://www.google.lk/',
# Why: #715 in Alexa global
'http://www.lifehacker.com/',
# Why: #716 in Alexa global
'http://www.today.com/',
# Why: #717 in Alexa global
'http://chinabyte.com/',
# Why: #718 in Alexa global
'http://southwest.com/',
# Why: #719 in Alexa global
'http://www.ca.gov/',
# Why: #720 in Alexa global
'http://nudevista.com/',
# Why: #721 in Alexa global
'http://www.yandex.com.tr/',
# Why: #722 in Alexa global
'http://people.com/',
# Why: #723 in Alexa global
'http://www.docin.com/',
# Why: #724 in Alexa global
'http://www.norton.com/',
# Why: #725 in Alexa global
'http://perfectgirls.net/',
# Why: #726 in Alexa global
'http://www.engadget.com/',
# Why: #727 in Alexa global
'http://www.realtor.com/',
# Why: #728 in Alexa global
'http://www.techcrunch.com/',
# Why: #729 in Alexa global
'http://www.time.com/',
# Why: #730 in Alexa global
'http://indianrail.gov.in/',
# Why: #731 in Alexa global
'http://www.dtiblog.com/',
# Why: #732 in Alexa global
'http://www.way2sms.com/',
# Why: #733 in Alexa global
'http://foodnetwork.com/',
# Why: #735 in Alexa global
'http://subscene.com/',
# Why: #736 in Alexa global
'http://www.worldstarhiphop.com/',
# Why: #737 in Alexa global
'http://tabnak.ir/',
# Why: #738 in Alexa global
'http://weather.com.cn/',
# Why: #739 in Alexa global
'http://aeriagames.com/',
# Why: #741 in Alexa global
'http://leagueoflegends.com/',
# Why: #742 in Alexa global
'http://51.la/',
# Why: #743 in Alexa global
'http://www.facenama.com/',
# Why: #744 in Alexa global
'http://189.cn/',
# Why: #745 in Alexa global
'http://sapo.pt/',
# Why: #746 in Alexa global
'http://www.bitshare.com/',
# Why: #748 in Alexa global
'http://gamespot.com/',
# Why: #749 in Alexa global
'http://cy-pr.com/',
# Why: #750 in Alexa global
'http://kankan.com/',
# Why: #751 in Alexa global
'http://google.co.nz/',
# Why: #752 in Alexa global
'http://www.liveleak.com/',
# Why: #753 in Alexa global
'http://video-one.com/',
# Why: #754 in Alexa global
'http://marktplaats.nl/',
# Why: #755 in Alexa global
'http://elwatannews.com/',
# Why: #756 in Alexa global
'http://www.roulettebotplus.com/',
# Why: #757 in Alexa global
'http://www.adserverplus.com/',
# Why: #758 in Alexa global
'http://www.akhbarak.net/',
# Why: #759 in Alexa global
'http://gumtree.com/',
# Why: #760 in Alexa global
'http://weheartit.com/',
# Why: #761 in Alexa global
'http://www.openadserving.com/',
# Why: #762 in Alexa global
'http://sporx.com/',
# Why: #763 in Alexa global
'http://www.focus.cn/',
# Why: #764 in Alexa global
'http://www.mercadolibre.com.ve/',
# Why: #765 in Alexa global
'http://www.zendesk.com/',
# Why: #766 in Alexa global
'http://www.houzz.com/',
# Why: #767 in Alexa global
'http://asos.com/',
# Why: #768 in Alexa global
'http://www.letitbit.net/',
# Why: #769 in Alexa global
'http://www.geocities.jp/',
# Why: #770 in Alexa global
'http://www.ocn.ne.jp/',
# Why: #771 in Alexa global
'http://quora.com/',
# Why: #772 in Alexa global
'http://www.yandex.kz/',
# Why: #773 in Alexa global
'http://www.mcafee.com/',
# Why: #774 in Alexa global
'http://www.ensonhaber.com/',
# Why: #775 in Alexa global
'http://www.gamefaqs.com/',
# Why: #776 in Alexa global
'http://vk.me/',
# Why: #777 in Alexa global
'http://avast.com/',
# Why: #778 in Alexa global
'http://website-unavailable.com/',
# Why: #779 in Alexa global
'http://www.22find.com/',
# Why: #780 in Alexa global
'http://www.admagnet.net/',
# Why: #781 in Alexa global
'http://rottentomatoes.com/',
# Why: #782 in Alexa global
'http://google.com.kw/',
# Why: #783 in Alexa global
'http://www.cloob.com/',
# Why: #784 in Alexa global
'http://www.nokia.com/',
# Why: #785 in Alexa global
'http://wetter.com/',
# Why: #786 in Alexa global
'http://www.taboola.com/',
# Why: #787 in Alexa global
'http://www.tenpay.com/',
# Why: #788 in Alexa global
'http://www.888.com/',
# Why: #789 in Alexa global
'http://flipora.com/',
# Why: #790 in Alexa global
'http://www.adhitprofits.com/',
# Why: #791 in Alexa global
'http://www.timeanddate.com/',
# Why: #792 in Alexa global
'http://www.as.com/',
# Why: #793 in Alexa global
'http://www.fanpop.com/',
# Why: #794 in Alexa global
'http://informer.com/',
# Why: #795 in Alexa global
'http://www.blogimg.jp/',
# Why: #796 in Alexa global
'http://exblog.jp/',
# Why: #797 in Alexa global
'http://www.over-blog.com/',
# Why: #798 in Alexa global
'http://www.itau.com.br/',
# Why: #799 in Alexa global
'http://balagana.net/',
# Why: #800 in Alexa global
'http://www.ellechina.com/',
# Why: #801 in Alexa global
'http://avazutracking.net/',
# Why: #802 in Alexa global
'http://www.gap.com/',
# Why: #803 in Alexa global
'http://www.examiner.com/',
# Why: #804 in Alexa global
'http://www.vporn.com/',
# Why: #805 in Alexa global
'http://lenovo.com/',
# Why: #806 in Alexa global
'http://www.eonline.com/',
# Why: #807 in Alexa global
'http://r7.com/',
# Why: #808 in Alexa global
'http://majesticseo.com/',
# Why: #809 in Alexa global
'http://immobilienscout24.de/',
# Why: #810 in Alexa global
'http://www.google.kz/',
# Why: #811 in Alexa global
'http://goo.gl/',
# Why: #812 in Alexa global
'http://zwaar.net/',
# Why: #814 in Alexa global
'http://www.bankmellat.ir/',
# Why: #815 in Alexa global
'http://alphaporno.com/',
# Why: #816 in Alexa global
'http://whitepages.com/',
# Why: #817 in Alexa global
'http://viva.co.id/',
# Why: #818 in Alexa global
'http://www.rutor.org/',
# Why: #819 in Alexa global
'http://wiktionary.org/',
# Why: #820 in Alexa global
'http://intuit.com/',
# Why: #821 in Alexa global
'http://www.gismeteo.ru/',
# Why: #822 in Alexa global
'http://dantri.com.vn/',
# Why: #823 in Alexa global
'http://www.xbox.com/',
# Why: #824 in Alexa global
'http://www.myegy.com/',
# Why: #825 in Alexa global
'http://www.xtube.com/',
# Why: #826 in Alexa global
'http://masrawy.com/',
# Why: #827 in Alexa global
'http://www.urbandictionary.com/',
# Why: #828 in Alexa global
'http://agoda.com/',
# Why: #829 in Alexa global
'http://www.ebay.fr/',
# Why: #830 in Alexa global
'http://www.kickstarter.com/',
# Why: #831 in Alexa global
'http://www.6park.com/',
# Why: #832 in Alexa global
'http://www.metacafe.com/',
# Why: #833 in Alexa global
'http://www.yamahaonlinestore.com/',
# Why: #834 in Alexa global
'http://www.anysex.com/',
# Why: #835 in Alexa global
'http://www.azlyrics.com/',
# Why: #836 in Alexa global
'http://www.rt.com/',
# Why: #837 in Alexa global
'http://www.ibm.com/',
# Why: #838 in Alexa global
'http://www.nordstrom.com/',
# Why: #839 in Alexa global
'http://ezinearticles.com/',
# Why: #840 in Alexa global
'http://www.cnbc.com/',
# Why: #841 in Alexa global
'http://redtubelive.com/',
# Why: #842 in Alexa global
'http://clicksvenue.com/',
# Why: #843 in Alexa global
'http://www.tradus.com/',
# Why: #844 in Alexa global
'http://www.gamer.com.tw/',
# Why: #846 in Alexa global
'http://www.m2newmedia.com/',
# Why: #848 in Alexa global
'http://www.custhelp.com/',
# Why: #849 in Alexa global
'http://www.4chan.org/',
# Why: #850 in Alexa global
'http://www.kioskea.net/',
# Why: #851 in Alexa global
'http://yoka.com/',
# Why: #852 in Alexa global
'http://www.7k7k.com/',
# Why: #853 in Alexa global
'http://www.opensiteexplorer.org/',
# Why: #854 in Alexa global
'http://www.musica.com/',
# Why: #855 in Alexa global
'http://www.coupons.com/',
# Why: #856 in Alexa global
'http://cracked.com/',
# Why: #857 in Alexa global
'http://www.caixa.gov.br/',
# Why: #858 in Alexa global
'http://www.skysports.com/',
# Why: #859 in Alexa global
'http://www.kizi.com/',
# Why: #860 in Alexa global
'http://www.getresponse.com/',
# Why: #861 in Alexa global
'http://www.sky.com/',
# Why: #862 in Alexa global
'http://www.marketwatch.com/',
# Why: #863 in Alexa global
'http://www.google.com.ec/',
# Why: #864 in Alexa global
'http://www.cbslocal.com/',
# Why: #865 in Alexa global
'http://www.zhihu.com/',
# Why: #866 in Alexa global
'http://www.888poker.com/',
# Why: #867 in Alexa global
'http://www.digitalpoint.com/',
# Why: #868 in Alexa global
'http://www.blog.163.com/',
# Why: #869 in Alexa global
'http://www.rantsports.com/',
# Why: #870 in Alexa global
'http://videosexarchive.com/',
# Why: #871 in Alexa global
'http://www.who.is/',
# Why: #875 in Alexa global
'http://www.gogetlinks.net/',
# Why: #876 in Alexa global
'http://www.idnes.cz/',
# Why: #877 in Alexa global
'http://www.king.com/',
# Why: #878 in Alexa global
'http://www.say-move.org/',
# Why: #879 in Alexa global
'http://www.motherless.com/',
# Why: #880 in Alexa global
'http://www.npr.org/',
# Why: #881 in Alexa global
'http://www.legacy.com/',
# Why: #882 in Alexa global
'http://www.aljazeera.net/',
# Why: #883 in Alexa global
'http://barnesandnoble.com/',
# Why: #884 in Alexa global
'http://www.overstock.com/',
# Why: #885 in Alexa global
'http://www.drom.ru/',
# Why: #886 in Alexa global
'http://www.weather.gov/',
# Why: #887 in Alexa global
'http://gstatic.com/',
# Why: #888 in Alexa global
'http://www.amung.us/',
# Why: #889 in Alexa global
'http://www.traidnt.net/',
# Why: #890 in Alexa global
'http://www.ovh.net/',
# Why: #891 in Alexa global
'http://www.rtl.de/',
# Why: #892 in Alexa global
'http://howstuffworks.com/',
# Why: #893 in Alexa global
'http://digikala.com/',
# Why: #894 in Alexa global
'http://www.bannersbroker.com/',
# Why: #895 in Alexa global
'http://www.kohls.com/',
# Why: #896 in Alexa global
'http://www.google.com.do/',
# Why: #897 in Alexa global
'http://www.dealfish.co.th/',
# Why: #899 in Alexa global
'http://19lou.com/',
# Why: #900 in Alexa global
'http://www.okwave.jp/',
# Why: #901 in Alexa global
'http://www.ezpowerads.com/',
# Why: #902 in Alexa global
'http://www.lemonde.fr/',
# Why: #904 in Alexa global
'http://www.chexun.com/',
# Why: #905 in Alexa global
'http://folha.uol.com.br/',
# Why: #906 in Alexa global
'http://www.imagebam.com/',
# Why: #907 in Alexa global
'http://viooz.co/',
# Why: #908 in Alexa global
'http://www.prothom-alo.com/',
# Why: #909 in Alexa global
'http://360doc.com/',
# Why: #910 in Alexa global
'http://m-w.com/',
# Why: #912 in Alexa global
'http://fanfiction.net/',
# Why: #914 in Alexa global
'http://semrush.com/',
# Why: #915 in Alexa global
'http://www.mama.cn/',
# Why: #916 in Alexa global
'http://ci123.com/',
# Why: #917 in Alexa global
'http://www.plugrush.com/',
# Why: #918 in Alexa global
'http://www.cafemom.com/',
# Why: #919 in Alexa global
'http://mangareader.net/',
# Why: #920 in Alexa global
'http://haizhangs.com/',
# Why: #921 in Alexa global
'http://cdiscount.com/',
# Why: #922 in Alexa global
'http://zappos.com/',
# Why: #923 in Alexa global
'http://www.manta.com/',
# Why: #924 in Alexa global
'http://www.novinky.cz/',
# Why: #925 in Alexa global
'http://www.hi5.com/',
# Why: #926 in Alexa global
'http://www.blogspot.kr/',
# Why: #927 in Alexa global
'http://www.pr-cy.ru/',
# Why: #928 in Alexa global
'http://movie4k.to/',
# Why: #929 in Alexa global
'http://www.patch.com/',
# Why: #930 in Alexa global
'http://alarabiya.net/',
# Why: #931 in Alexa global
'http://indiamart.com/',
# Why: #932 in Alexa global
'http://www.nhk.or.jp/',
# Why: #933 in Alexa global
'http://cartrailor.com/',
# Why: #934 in Alexa global
'http://almasryalyoum.com/',
# Why: #935 in Alexa global
'http://315che.com/',
# Why: #936 in Alexa global
'http://www.google.by/',
# Why: #937 in Alexa global
'http://tomshardware.com/',
# Why: #938 in Alexa global
'http://minecraft.net/',
# Why: #939 in Alexa global
'http://www.gulfup.com/',
# Why: #940 in Alexa global
'http://www.rr.com/',
# Why: #942 in Alexa global
'http://www.spotify.com/',
# Why: #943 in Alexa global
'http://www.airtel.in/',
# Why: #944 in Alexa global
'http://www.espnfc.com/',
# Why: #945 in Alexa global
'http://sanook.com/',
# Why: #946 in Alexa global
'http://ria.ru/',
# Why: #947 in Alexa global
'http://google.com.qa/',
# Why: #948 in Alexa global
'http://jquery.com/',
# Why: #950 in Alexa global
'http://pinshan.com/',
# Why: #951 in Alexa global
'http://onlylady.com/',
# Why: #952 in Alexa global
'http://www.pornoxo.com/',
# Why: #953 in Alexa global
'http://cookpad.com/',
# Why: #954 in Alexa global
'http://www.pagesjaunes.fr/',
# Why: #955 in Alexa global
'http://www.usmagazine.com/',
# Why: #956 in Alexa global
'http://www.google.lt/',
# Why: #957 in Alexa global
'http://www.nu.nl/',
# Why: #958 in Alexa global
'http://www.hm.com/',
# Why: #959 in Alexa global
'http://fixya.com/',
# Why: #960 in Alexa global
'http://theblaze.com/',
# Why: #961 in Alexa global
'http://cbssports.com/',
# Why: #962 in Alexa global
'http://www.eyny.com/',
# Why: #963 in Alexa global
'http://17173.com/',
# Why: #964 in Alexa global
'http://www.excite.co.jp/',
# Why: #965 in Alexa global
'http://hc360.com/',
# Why: #966 in Alexa global
'http://www.cbs.com/',
# Why: #967 in Alexa global
'http://www.telegraaf.nl/',
# Why: #968 in Alexa global
'http://netlog.com/',
# Why: #969 in Alexa global
'http://voc.com.cn/',
# Why: #970 in Alexa global
'http://slickdeals.net/',
# Why: #971 in Alexa global
'http://www.ldblog.jp/',
# Why: #972 in Alexa global
'http://ruten.com.tw/',
# Why: #973 in Alexa global
'http://yobt.com/',
# Why: #974 in Alexa global
'http://certified-toolbar.com/',
# Why: #975 in Alexa global
'http://miercn.com/',
# Why: #976 in Alexa global
'http://aparat.com/',
# Why: #977 in Alexa global
'http://billdesk.com/',
# Why: #978 in Alexa global
'http://yandex.by/',
# Why: #979 in Alexa global
'http://888casino.com/',
# Why: #980 in Alexa global
'http://twitpic.com/',
# Why: #981 in Alexa global
'http://google.hr/',
# Why: #982 in Alexa global
'http://tubegalore.com/',
# Why: #983 in Alexa global
'http://dhgate.com/',
# Why: #984 in Alexa global
'http://makemytrip.com/',
# Why: #986 in Alexa global
'http://shop.com/',
# Why: #987 in Alexa global
'http://www.nike.com/',
# Why: #988 in Alexa global
'http://kayak.com/',
# Why: #989 in Alexa global
'http://pcbaby.com.cn/',
# Why: #990 in Alexa global
'http://fandango.com/',
# Why: #991 in Alexa global
'http://tutsplus.com/',
# Why: #992 in Alexa global
'http://gotomeeting.com/',
# Why: #994 in Alexa global
'http://shareasale.com/',
# Why: #995 in Alexa global
'http://www.boc.cn/',
# Why: #996 in Alexa global
'http://mpnrs.com/',
# Why: #997 in Alexa global
'http://keepvid.com/',
# Why: #998 in Alexa global
'http://www.lequipe.fr/',
# Why: #999 in Alexa global
'http://namecheap.com/',
# Why: #1000 in Alexa global
'http://doublepimp.com/',
# Why: #1001 in Alexa global
'http://softigloo.com/',
# Why: #1002 in Alexa global
'http://givemesport.com/',
# Why: #1003 in Alexa global
'http://mtime.com/',
# Why: #1004 in Alexa global
'http://letras.mus.br/',
# Why: #1005 in Alexa global
'http://pole-emploi.fr/',
# Why: #1006 in Alexa global
'http://biblegateway.com/',
# Why: #1007 in Alexa global
'http://independent.co.uk/',
# Why: #1009 in Alexa global
'http://e-hentai.org/',
# Why: #1010 in Alexa global
'http://www.gumtree.com.au/',
# Why: #1011 in Alexa global
'http://livestrong.com/',
# Why: #1012 in Alexa global
'http://game321.com/',
# Why: #1014 in Alexa global
'http://www.comcast.com/',
# Why: #1015 in Alexa global
'http://clubpenguin.com/',
# Why: #1016 in Alexa global
'http://rightmove.co.uk/',
# Why: #1017 in Alexa global
'http://steamcommunity.com/',
# Why: #1018 in Alexa global
'http://sockshare.com/',
# Why: #1019 in Alexa global
'http://globalconsumersurvey.com/',
# Why: #1020 in Alexa global
'http://rapidshare.com/',
# Why: #1021 in Alexa global
'http://auto.ru/',
# Why: #1022 in Alexa global
'http://www.staples.com/',
# Why: #1023 in Alexa global
'http://anitube.se/',
# Why: #1024 in Alexa global
'http://rozblog.com/',
# Why: #1025 in Alexa global
'http://reliancenetconnect.co.in/',
# Why: #1026 in Alexa global
'http://credit-agricole.fr/',
# Why: #1027 in Alexa global
'http://exposedwebcams.com/',
# Why: #1028 in Alexa global
'http://www.webalta.ru/',
# Why: #1029 in Alexa global
'http://www.usbank.com/',
# Why: #1030 in Alexa global
'http://www.google.com.ly/',
# Why: #1031 in Alexa global
'http://www.pantip.com/',
# Why: #1032 in Alexa global
'http://aftonbladet.se/',
# Why: #1033 in Alexa global
'http://scoop.it/',
# Why: #1034 in Alexa global
'http://www.mayoclinic.com/',
# Why: #1035 in Alexa global
'http://www.evernote.com/',
# Why: #1036 in Alexa global
'http://www.nyaa.eu/',
# Why: #1037 in Alexa global
'http://www.livingsocial.com/',
# Why: #1038 in Alexa global
'http://www.noaa.gov/',
# Why: #1039 in Alexa global
'http://www.imagefap.com/',
# Why: #1040 in Alexa global
'http://www.abchina.com/',
# Why: #1041 in Alexa global
'http://www.google.rs/',
# Why: #1042 in Alexa global
'http://www.amazon.in/',
# Why: #1043 in Alexa global
'http://www.tnaflix.com/',
# Why: #1044 in Alexa global
'http://www.xici.net/',
# Why: #1045 in Alexa global
'http://www.united.com/',
# Why: #1046 in Alexa global
'http://www.templatemonster.com/',
# Why: #1047 in Alexa global
'http://www.deezer.com/',
# Why: #1048 in Alexa global
'http://www.pixlr.com/',
# Why: #1049 in Alexa global
'http://www.tradedoubler.com/',
# Why: #1050 in Alexa global
'http://www.gumtree.co.za/',
# Why: #1051 in Alexa global
'http://www.r10.net/',
# Why: #1052 in Alexa global
'http://www.kongregate.com/',
# Why: #1053 in Alexa global
'http://www.jeuxvideo.com/',
# Why: #1054 in Alexa global
'http://www.gawker.com/',
# Why: #1055 in Alexa global
'http://chewen.com/',
# Why: #1056 in Alexa global
'http://www.r2games.com/',
# Why: #1057 in Alexa global
'http://www.mayajo.com/',
# Why: #1058 in Alexa global
'http://www.topix.com/',
# Why: #1059 in Alexa global
'http://www.easyhits4u.com/',
# Why: #1060 in Alexa global
'http://www.netteller.com/',
# Why: #1061 in Alexa global
'http://www.ing.nl/',
# Why: #1062 in Alexa global
'http://www.tripadvisor.co.uk/',
# Why: #1063 in Alexa global
'http://www.udn.com/',
# Why: #1064 in Alexa global
'http://www.cheezburger.com/',
# Why: #1065 in Alexa global
'http://www.fotostrana.ru/',
# Why: #1066 in Alexa global
'http://www.bbc.com/',
# Why: #1067 in Alexa global
'http://www.behance.net/',
# Why: #1068 in Alexa global
'http://www.lefigaro.fr/',
# Why: #1069 in Alexa global
'http://www.nikkei.com/',
# Why: #1070 in Alexa global
'http://www.fidelity.com/',
# Why: #1071 in Alexa global
'http://www.baomihua.com/',
# Why: #1072 in Alexa global
'http://www.fool.com/',
# Why: #1073 in Alexa global
'http://www.nairaland.com/',
# Why: #1074 in Alexa global
'http://www.sendspace.com/',
# Why: #1075 in Alexa global
'http://www.woot.com/',
# Why: #1076 in Alexa global
'http://www.travelocity.com/',
# Why: #1077 in Alexa global
'http://www.shopclues.com/',
# Why: #1078 in Alexa global
'http://www.sureonlinefind.com/',
# Why: #1080 in Alexa global
'http://www.gizmodo.com/',
# Why: #1081 in Alexa global
'http://www.hidemyass.com/',
# Why: #1082 in Alexa global
'http://www.o2.pl/',
# Why: #1083 in Alexa global
'http://www.clickbank.net/',
# Why: #1084 in Alexa global
'http://www.fotolia.com/',
# Why: #1085 in Alexa global
'http://www.opera.com/',
# Why: #1086 in Alexa global
'http://www.sabah.com.tr/',
# Why: #1087 in Alexa global
'http://www.n-mobile.net/',
# Why: #1088 in Alexa global
'http://www.chacha.com/',
# Why: #1089 in Alexa global
'http://www.autotrader.com/',
# Why: #1090 in Alexa global
'http://www.anonym.to/',
# Why: #1091 in Alexa global
'http://www.walmart.com.br/',
# Why: #1092 in Alexa global
'http://www.yjc.ir/',
# Why: #1093 in Alexa global
'http://www.autoscout24.de/',
# Why: #1094 in Alexa global
'http://www.gobookee.net/',
# Why: #1096 in Alexa global
'http://www.yaolan.com/',
# Why: #1097 in Alexa global
'http://www.india.com/',
# Why: #1098 in Alexa global
'http://www.tribalfusion.com/',
# Why: #1099 in Alexa global
'http://www.gittigidiyor.com/',
# Why: #1100 in Alexa global
'http://www.otto.de/',
# Why: #1101 in Alexa global
'http://www.adclickxpress.com/',
# Why: #1102 in Alexa global
'http://www.made-in-china.com/',
# Why: #1103 in Alexa global
'http://www.ahram.org.eg/',
# Why: #1104 in Alexa global
'http://www.asriran.com/',
# Why: #1105 in Alexa global
'http://www.blackberry.com/',
# Why: #1106 in Alexa global
'http://www.beytoote.com/',
# Why: #1107 in Alexa global
'http://www.piriform.com/',
# Why: #1108 in Alexa global
'http://www.ilmeteo.it/',
# Why: #1109 in Alexa global
'http://www.att.net/',
# Why: #1110 in Alexa global
'http://www.brainyquote.com/',
# Why: #1111 in Alexa global
'http://www.last.fm/',
# Why: #1112 in Alexa global
'http://www.directadvert.ru/',
# Why: #1113 in Alexa global
'http://www.slate.com/',
# Why: #1114 in Alexa global
'http://www.mangahere.com/',
# Why: #1115 in Alexa global
'http://www.jalan.net/',
# Why: #1116 in Alexa global
'http://www.blog.com/',
# Why: #1117 in Alexa global
'http://www.tuvaro.com/',
# Why: #1118 in Alexa global
'http://www.doc88.com/',
# Why: #1119 in Alexa global
'http://www.mbc.net/',
# Why: #1120 in Alexa global
'http://www.europa.eu/',
# Why: #1121 in Alexa global
'http://www.onlinedown.net/',
# Why: #1122 in Alexa global
'http://www.jcpenney.com/',
# Why: #1123 in Alexa global
'http://www.myplaycity.com/',
# Why: #1124 in Alexa global
'http://www.bahn.de/',
# Why: #1125 in Alexa global
'http://www.laredoute.fr/',
# Why: #1126 in Alexa global
'http://www.alexa.com/',
# Why: #1127 in Alexa global
'http://www.rakuten.ne.jp/',
# Why: #1128 in Alexa global
'http://www.flashx.tv/',
# Why: #1129 in Alexa global
'http://51.com/',
# Why: #1130 in Alexa global
'http://www.mail.com/',
# Why: #1131 in Alexa global
'http://www.costco.com/',
# Why: #1132 in Alexa global
'http://www.mirror.co.uk/',
# Why: #1133 in Alexa global
'http://www.chinadaily.com.cn/',
# Why: #1134 in Alexa global
'http://www.japanpost.jp/',
# Why: #1135 in Alexa global
'http://www.hubspot.com/',
# Why: #1136 in Alexa global
'http://www.tf1.fr/',
# Why: #1137 in Alexa global
'http://www.merdeka.com/',
# Why: #1138 in Alexa global
'http://www.nypost.com/',
# Why: #1139 in Alexa global
'http://www.1mall.com/',
# Why: #1140 in Alexa global
'http://www.wmtransfer.com/',
# Why: #1141 in Alexa global
'http://www.pcmag.com/',
# Why: #1142 in Alexa global
'http://www.univision.com/',
# Why: #1143 in Alexa global
'http://www.nationalgeographic.com/',
# Why: #1144 in Alexa global
'http://www.sourtimes.org/',
# Why: #1145 in Alexa global
'http://www.iciba.com/',
# Why: #1146 in Alexa global
'http://www.petardas.com/',
# Why: #1147 in Alexa global
'http://www.wmmail.ru/',
# Why: #1148 in Alexa global
'http://www.light-dark.net/',
# Why: #1149 in Alexa global
'http://www.ultimate-guitar.com/',
# Why: #1150 in Alexa global
'http://www.koramgame.com/',
# Why: #1151 in Alexa global
'http://www.megavod.fr/',
# Why: #1152 in Alexa global
'http://www.smh.com.au/',
# Why: #1153 in Alexa global
'http://www.ticketmaster.com/',
# Why: #1154 in Alexa global
'http://www.admin5.com/',
# Why: #1155 in Alexa global
'http://get-a-fuck-tonight.com/',
# Why: #1156 in Alexa global
'http://www.eenadu.net/',
# Why: #1157 in Alexa global
'http://www.argos.co.uk/',
# Why: #1159 in Alexa global
'http://www.nipic.com/',
# Why: #1160 in Alexa global
'http://www.google.iq/',
# Why: #1161 in Alexa global
'http://www.alhea.com/',
# Why: #1162 in Alexa global
'http://www.citrixonline.com/',
# Why: #1163 in Alexa global
'http://www.girlsgogames.com/',
# Why: #1164 in Alexa global
'http://www.fanatik.com.tr/',
# Why: #1165 in Alexa global
'http://www.yahoo-mbga.jp/',
# Why: #1166 in Alexa global
'http://www.google.tn/',
# Why: #1167 in Alexa global
'http://www.usaa.com/',
# Why: #1168 in Alexa global
'http://www.earthlink.net/',
# Why: #1169 in Alexa global
'http://www.ryanair.com/',
# Why: #1170 in Alexa global
'http://www.city-data.com/',
# Why: #1171 in Alexa global
'http://www.lloydstsb.co.uk/',
# Why: #1173 in Alexa global
'http://www.pornsharia.com/',
# Why: #1174 in Alexa global
'http://www.blogspot.tw/',
# Why: #1175 in Alexa global
'http://www.baixing.com/',
# Why: #1176 in Alexa global
'http://www.all-free-download.com/',
# Why: #1177 in Alexa global
'http://www.qianyan001.com/',
# Why: #1178 in Alexa global
'http://www.hellporno.com/',
# Why: #1179 in Alexa global
'http://www.pornmd.com/',
# Why: #1180 in Alexa global
'http://www.conferenceplus.com/',
# Why: #1181 in Alexa global
'http://www.docstoc.com/',
# Why: #1182 in Alexa global
'http://www.christian-dogma.com/',
# Why: #1183 in Alexa global
'http://www.sinaimg.cn/',
# Why: #1184 in Alexa global
'http://www.dmoz.org/',
# Why: #1185 in Alexa global
'http://www.perezhilton.com/',
# Why: #1186 in Alexa global
'http://www.mega.co.nz/',
# Why: #1187 in Alexa global
'http://www.pchome.com.tw/',
# Why: #1188 in Alexa global
'http://www.zazzle.com/',
# Why: #1189 in Alexa global
'http://www.echoroukonline.com/',
# Why: #1190 in Alexa global
'http://www.ea.com/',
# Why: #1191 in Alexa global
'http://www.yiqifa.com/',
# Why: #1193 in Alexa global
'http://www.mysearchdial.com/',
# Why: #1194 in Alexa global
'http://www.hotwire.com/',
# Why: #1195 in Alexa global
'http://www.ninemsn.com.au/',
# Why: #1196 in Alexa global
'http://www.tablica.pl/',
# Why: #1197 in Alexa global
'http://www.brazzers.com/',
# Why: #1198 in Alexa global
'http://www.americanas.com.br/',
# Why: #1199 in Alexa global
'http://www.extremetube.com/',
# Why: #1200 in Alexa global
'http://www.zynga.com/',
# Why: #1201 in Alexa global
'http://www.buscape.com.br/',
# Why: #1202 in Alexa global
'http://www.t-mobile.com/',
# Why: #1204 in Alexa global
'http://www.portaldosites.com/',
# Why: #1205 in Alexa global
'http://www.businessweek.com/',
# Why: #1206 in Alexa global
'http://www.feedburner.com/',
# Why: #1207 in Alexa global
'http://www.contenko.com/',
# Why: #1208 in Alexa global
'http://www.homeshop18.com/',
# Why: #1209 in Alexa global
'http://www.bmi.ir/',
# Why: #1210 in Alexa global
'http://www.wwe.com/',
# Why: #1211 in Alexa global
'http://www.adult-empire.com/',
# Why: #1212 in Alexa global
'http://www.nfl.com/',
# Why: #1213 in Alexa global
'http://www.globososo.com/',
# Why: #1214 in Alexa global
'http://www.sfgate.com/',
# Why: #1215 in Alexa global
'http://www.mmotraffic.com/',
# Why: #1216 in Alexa global
'http://www.zalando.de/',
# Why: #1217 in Alexa global
'http://www.warthunder.com/',
# Why: #1218 in Alexa global
'http://www.icloud.com/',
# Why: #1219 in Alexa global
'http://www.xiami.com/',
# Why: #1220 in Alexa global
'http://www.newsmax.com/',
# Why: #1221 in Alexa global
'http://www.solarmovie.so/',
# Why: #1222 in Alexa global
'http://www.junglee.com/',
# Why: #1223 in Alexa global
'http://www.discovercard.com/',
# Why: #1224 in Alexa global
'http://www.hh.ru/',
# Why: #1225 in Alexa global
'http://www.searchengineland.com/',
# Why: #1226 in Alexa global
'http://www.labanquepostale.fr/',
# Why: #1227 in Alexa global
'http://www.51cto.com/',
# Why: #1228 in Alexa global
'http://www.appledaily.com.tw/',
# Why: #1229 in Alexa global
'http://www.fling.com/',
# Why: #1230 in Alexa global
'http://www.liveperson.net/',
# Why: #1231 in Alexa global
'http://www.sulit.com.ph/',
# Why: #1232 in Alexa global
'http://www.tinypic.com/',
# Why: #1233 in Alexa global
'http://www.meilishuo.com/',
# Why: #1234 in Alexa global
'http://googleadservices.com/',
# Why: #1235 in Alexa global
'http://www.boston.com/',
# Why: #1236 in Alexa global
'http://www.chron.com/',
# Why: #1237 in Alexa global
'http://www.breitbart.com/',
# Why: #1238 in Alexa global
'http://www.youjizzlive.com/',
# Why: #1239 in Alexa global
'http://www.commbank.com.au/',
# Why: #1240 in Alexa global
'http://www.axisbank.com/',
# Why: #1241 in Alexa global
'http://www.wired.com/',
# Why: #1242 in Alexa global
'http://www.trialpay.com/',
# Why: #1243 in Alexa global
'http://www.berniaga.com/',
# Why: #1244 in Alexa global
'http://cnmo.com/',
# Why: #1245 in Alexa global
'http://www.tunein.com/',
# Why: #1246 in Alexa global
'http://www.hotfile.com/',
# Why: #1247 in Alexa global
'http://www.dubizzle.com/',
# Why: #1248 in Alexa global
'http://www.olx.com.br/',
# Why: #1249 in Alexa global
'http://haxiu.com/',
# Why: #1250 in Alexa global
'http://www.zulily.com/',
# Why: #1251 in Alexa global
'http://www.infolinks.com/',
# Why: #1252 in Alexa global
'http://www.yourgirlfriends.com/',
# Why: #1253 in Alexa global
'http://www.logmein.com/',
# Why: #1255 in Alexa global
'http://www.irs.gov/',
# Why: #1256 in Alexa global
'http://www.noticiadeldia.com/',
# Why: #1257 in Alexa global
'http://www.nbcsports.com/',
# Why: #1258 in Alexa global
'http://www.holasearch.com/',
# Why: #1259 in Alexa global
'http://www.wo.com.cn/',
# Why: #1260 in Alexa global
'http://www.indianexpress.com/',
# Why: #1261 in Alexa global
'http://www.depositfiles.com/',
# Why: #1262 in Alexa global
'http://www.elfagr.org/',
# Why: #1263 in Alexa global
'http://himado.in/',
# Why: #1264 in Alexa global
'http://www.lumosity.com/',
# Why: #1265 in Alexa global
'http://www.mbank.com.pl/',
# Why: #1266 in Alexa global
'http://www.primewire.ag/',
# Why: #1267 in Alexa global
'http://www.dreamstime.com/',
# Why: #1268 in Alexa global
'http://sootoo.com/',
# Why: #1269 in Alexa global
'http://www.souq.com/',
# Why: #1270 in Alexa global
'http://www.weblio.jp/',
# Why: #1272 in Alexa global
'http://www.craigslist.ca/',
# Why: #1273 in Alexa global
'http://www.zara.com/',
# Why: #1274 in Alexa global
'http://www.cheshi.com.cn/',
# Why: #1275 in Alexa global
'http://www.groupon.it/',
# Why: #1276 in Alexa global
'http://www.mangafox.me/',
# Why: #1277 in Alexa global
'http://www.casino.com/',
# Why: #1278 in Alexa global
'http://www.armorgames.com/',
# Why: #1279 in Alexa global
'http://www.zanox.com/',
# Why: #1280 in Alexa global
'http://www.finn.no/',
# Why: #1281 in Alexa global
'http://www.qihoo.com/',
# Why: #1282 in Alexa global
'http://www.toysrus.com/',
# Why: #1283 in Alexa global
'http://www.airasia.com/',
# Why: #1284 in Alexa global
'http://www.dafont.com/',
# Why: #1285 in Alexa global
'http://www.tvmuse.eu/',
# Why: #1286 in Alexa global
'http://www.pnc.com/',
# Why: #1287 in Alexa global
'http://www.donanimhaber.com/',
# Why: #1288 in Alexa global
'http://cnbeta.com/',
# Why: #1289 in Alexa global
'http://www.prntscr.com/',
# Why: #1290 in Alexa global
'http://www.cox.net/',
# Why: #1291 in Alexa global
'http://www.bloglovin.com/',
# Why: #1292 in Alexa global
'http://www.picmonkey.com/',
# Why: #1293 in Alexa global
'http://www.zoho.com/',
# Why: #1294 in Alexa global
'http://www.glassdoor.com/',
# Why: #1295 in Alexa global
'http://www.myfitnesspal.com/',
# Why: #1296 in Alexa global
'http://www.change.org/',
# Why: #1297 in Alexa global
'http://www.aa.com/',
# Why: #1298 in Alexa global
'http://www.playstation.com/',
# Why: #1300 in Alexa global
'http://www.b1.org/',
# Why: #1301 in Alexa global
'http://www.correios.com.br/',
# Why: #1302 in Alexa global
'http://www.hindustantimes.com/',
# Why: #1303 in Alexa global
'http://www.softlayer.com/',
# Why: #1304 in Alexa global
'http://www.imagevenue.com/',
# Why: #1305 in Alexa global
'http://www.windowsphone.com/',
# Why: #1306 in Alexa global
'http://www.wikimapia.org/',
# Why: #1307 in Alexa global
'http://www.transfermarkt.de/',
# Why: #1308 in Alexa global
'http://www.dict.cc/',
# Why: #1309 in Alexa global
'http://www.blocket.se/',
# Why: #1310 in Alexa global
'http://www.lacaixa.es/',
# Why: #1311 in Alexa global
'http://www.hilton.com/',
# Why: #1312 in Alexa global
'http://www.mtv.com/',
# Why: #1313 in Alexa global
'http://www.cbc.ca/',
# Why: #1314 in Alexa global
'http://www.msn.ca/',
# Why: #1315 in Alexa global
'http://www.box.com/',
# Why: #1316 in Alexa global
'http://www.szn.cz/',
# Why: #1317 in Alexa global
'http://www.haodf.com/',
# Why: #1318 in Alexa global
'http://www.monsterindia.com/',
# Why: #1319 in Alexa global
'http://www.okezone.com/',
# Why: #1320 in Alexa global
'http://www.entertainment-factory.com/',
# Why: #1321 in Alexa global
'http://www.linternaute.com/',
# Why: #1322 in Alexa global
'http://www.break.com/',
# Why: #1323 in Alexa global
'http://www.ustream.tv/',
# Why: #1324 in Alexa global
'http://www.songspk.name/',
# Why: #1325 in Alexa global
'http://www.bilibili.tv/',
# Why: #1326 in Alexa global
'http://www.avira.com/',
# Why: #1327 in Alexa global
'http://www.thehindu.com/',
# Why: #1328 in Alexa global
'http://www.watchmygf.com/',
# Why: #1329 in Alexa global
'http://www.google.co.ma/',
# Why: #1330 in Alexa global
'http://www.nick.com/',
# Why: #1331 in Alexa global
'http://www.sp.gov.br/',
# Why: #1332 in Alexa global
'http://www.zeobit.com/',
# Why: #1333 in Alexa global
'http://www.sprint.com/',
# Why: #1334 in Alexa global
'http://www.khabaronline.ir/',
# Why: #1335 in Alexa global
'http://www.magentocommerce.com/',
# Why: #1336 in Alexa global
'http://www.hsbc.co.uk/',
# Why: #1337 in Alexa global
'http://www.trafficholder.com/',
# Why: #1338 in Alexa global
'http://www.gamestop.com/',
# Why: #1339 in Alexa global
'http://www.cartoonnetwork.com/',
# Why: #1340 in Alexa global
'http://www.fifa.com/',
# Why: #1341 in Alexa global
'http://www.ebay.ca/',
# Why: #1342 in Alexa global
'http://www.vatanim.com.tr/',
# Why: #1343 in Alexa global
'http://www.qvc.com/',
# Why: #1344 in Alexa global
'http://www.marriott.com/',
# Why: #1345 in Alexa global
'http://www.eventbrite.com/',
# Why: #1346 in Alexa global
'http://www.gi-akademie.com/',
# Why: #1347 in Alexa global
'http://www.intel.com/',
# Why: #1348 in Alexa global
'http://www.oschina.net/',
# Why: #1349 in Alexa global
'http://www.dojki.com/',
# Why: #1350 in Alexa global
'http://www.thechive.com/',
# Why: #1351 in Alexa global
'http://www.viadeo.com/',
# Why: #1352 in Alexa global
'http://www.walgreens.com/',
# Why: #1353 in Alexa global
'http://www.leo.org/',
# Why: #1354 in Alexa global
'http://www.statscrop.com/',
# Why: #1355 in Alexa global
'http://www.brothersoft.com/',
# Why: #1356 in Alexa global
'http://www.allocine.fr/',
# Why: #1357 in Alexa global
'http://www.slutload.com/',
# Why: #1358 in Alexa global
'http://www.google.com.gt/',
# Why: #1359 in Alexa global
'http://www.santabanta.com/',
# Why: #1360 in Alexa global
'http://www.stardoll.com/',
# Why: #1361 in Alexa global
'http://www.polyvore.com/',
# Why: #1362 in Alexa global
'http://www.focus.de/',
# Why: #1363 in Alexa global
'http://www.duckduckgo.com/',
# Why: #1364 in Alexa global
'http://www.funshion.com/',
# Why: #1365 in Alexa global
'http://www.marieclairechina.com/',
# Why: #1366 in Alexa global
'http://www.internethaber.com/',
# Why: #1367 in Alexa global
'http://www.worldoftanks.ru/',
# Why: #1369 in Alexa global
'http://www.1und1.de/',
# Why: #1370 in Alexa global
'http://www.anyporn.com/',
# Why: #1371 in Alexa global
'http://www.17u.cn/',
# Why: #1372 in Alexa global
'http://www.cars.com/',
# Why: #1373 in Alexa global
'http://www.asg.to/',
# Why: #1374 in Alexa global
'http://www.alice.it/',
# Why: #1375 in Alexa global
'http://www.hongkiat.com/',
# Why: #1376 in Alexa global
'http://www.bhphotovideo.com/',
# Why: #1377 in Alexa global
'http://www.bdnews24.com/',
# Why: #1378 in Alexa global
'http://sdo.com/',
# Why: #1379 in Alexa global
'http://www.cerdas.com/',
# Why: #1380 in Alexa global
'http://www.clarin.com/',
# Why: #1381 in Alexa global
'http://www.victoriassecret.com/',
# Why: #1382 in Alexa global
'http://www.instructables.com/',
# Why: #1383 in Alexa global
'http://www.state.gov/',
# Why: #1384 in Alexa global
'http://www.agame.com/',
# Why: #1385 in Alexa global
'http://www.xiaomi.com/',
# Why: #1386 in Alexa global
'http://esporte.uol.com.br/',
# Why: #1387 in Alexa global
'http://www.adfoc.us/',
# Why: #1388 in Alexa global
'http://www.telekom.com/',
# Why: #1389 in Alexa global
'http://www.skycn.com/',
# Why: #1390 in Alexa global
'http://www.orbitz.com/',
# Why: #1391 in Alexa global
'http://www.nhl.com/',
# Why: #1392 in Alexa global
'http://www.vistaprint.com/',
# Why: #1393 in Alexa global
'http://trklnks.com/',
# Why: #1394 in Alexa global
'http://www.basecamp.com/',
# Why: #1395 in Alexa global
'http://www.hot-sex-tube.com/',
# Why: #1396 in Alexa global
'http://www.incredibar-search.com/',
# Why: #1397 in Alexa global
'http://www.qingdaonews.com/',
# Why: #1398 in Alexa global
'http://www.sabq.org/',
# Why: #1399 in Alexa global
'http://www.nasa.gov/',
# Why: #1400 in Alexa global
'http://www.dx.com/',
# Why: #1401 in Alexa global
'http://www.addmefast.com/',
# Why: #1402 in Alexa global
'http://www.yepi.com/',
# Why: #1403 in Alexa global
'http://www.xxx-ok.com/',
# Why: #1405 in Alexa global
'http://www.sex.com/',
# Why: #1406 in Alexa global
'http://www.food.com/',
# Why: #1407 in Alexa global
'http://www.freeones.com/',
# Why: #1408 in Alexa global
'http://www.tesco.com/',
# Why: #1409 in Alexa global
'http://www.a10.com/',
# Why: #1410 in Alexa global
'http://www.mynavi.jp/',
# Why: #1411 in Alexa global
'http://www.abc.net.au/',
# Why: #1412 in Alexa global
'http://www.internetdownloadmanager.com/',
# Why: #1413 in Alexa global
'http://www.seowhy.com/',
# Why: #1414 in Alexa global
'http://114so.cn/',
# Why: #1415 in Alexa global
'http://www.otomoto.pl/',
# Why: #1416 in Alexa global
'http://www.idealo.de/',
# Why: #1417 in Alexa global
'http://www.laposte.net/',
# Why: #1418 in Alexa global
'http://www.eroprofile.com/',
# Why: #1419 in Alexa global
'http://www.bbb.org/',
# Why: #1420 in Alexa global
'http://www.gnavi.co.jp/',
# Why: #1421 in Alexa global
'http://www.tiu.ru/',
# Why: #1422 in Alexa global
'http://www.blogsky.com/',
# Why: #1423 in Alexa global
'http://www.bigfishgames.com/',
# Why: #1424 in Alexa global
'http://www.weiphone.com/',
# Why: #1425 in Alexa global
'http://www.livescore.com/',
# Why: #1426 in Alexa global
'http://www.tubepleasure.com/',
# Why: #1427 in Alexa global
'http://www.net.cn/',
# Why: #1428 in Alexa global
'http://www.jagran.com/',
# Why: #1429 in Alexa global
'http://www.livestream.com/',
# Why: #1430 in Alexa global
'http://stagram.com/',
# Why: #1431 in Alexa global
'http://www.vine.co/',
# Why: #1432 in Alexa global
'http://www.olx.com.pk/',
# Why: #1433 in Alexa global
'http://www.edmunds.com/',
# Why: #1434 in Alexa global
'http://www.banglanews24.com/',
# Why: #1435 in Alexa global
'http://www.reverso.net/',
# Why: #1436 in Alexa global
'http://www.stargames.at/',
# Why: #1437 in Alexa global
'http://www.postimg.org/',
# Why: #1438 in Alexa global
'http://www.overthumbs.com/',
# Why: #1439 in Alexa global
'http://www.iteye.com/',
# Why: #1440 in Alexa global
'http://www.yify-torrents.com/',
# Why: #1441 in Alexa global
'http://www.forexfactory.com/',
# Why: #1442 in Alexa global
'http://www.jugem.jp/',
# Why: #1443 in Alexa global
'http://www.hefei.cc/',
# Why: #1444 in Alexa global
'http://www.thefreecamsecret.com/',
# Why: #1445 in Alexa global
'http://www.sponichi.co.jp/',
# Why: #1446 in Alexa global
'http://www.lanacion.com.ar/',
# Why: #1447 in Alexa global
'http://www.jeu-a-telecharger.com/',
# Why: #1448 in Alexa global
'http://www.spartoo.com/',
# Why: #1449 in Alexa global
'http://www.adv-adserver.com/',
# Why: #1450 in Alexa global
'http://www.asus.com/',
# Why: #1451 in Alexa global
'http://www.91.com/',
# Why: #1452 in Alexa global
'http://www.wimbledon.com/',
# Why: #1454 in Alexa global
'http://www.yam.com/',
# Why: #1455 in Alexa global
'http://www.grooveshark.com/',
# Why: #1456 in Alexa global
'http://www.tdcanadatrust.com/',
# Why: #1457 in Alexa global
'http://www.lovetime.com/',
# Why: #1458 in Alexa global
'http://www.iltalehti.fi/',
# Why: #1459 in Alexa global
'http://www.alnaddy.com/',
# Why: #1460 in Alexa global
'http://www.bb.com.br/',
# Why: #1461 in Alexa global
'http://www.msn.co.jp/',
# Why: #1462 in Alexa global
'http://www.tebyan.net/',
# Why: #1463 in Alexa global
'http://www.redbox.com/',
# Why: #1464 in Alexa global
'http://www.filecrop.com/',
# Why: #1465 in Alexa global
'http://www.aliyun.com/',
# Why: #1466 in Alexa global
'http://www.21cn.com/',
# Why: #1467 in Alexa global
'http://www.news24.com/',
# Why: #1468 in Alexa global
'http://www.infowars.com/',
# Why: #1469 in Alexa global
'http://www.thetaoofbadass.com/',
# Why: #1470 in Alexa global
'http://www.juegos.com/',
# Why: #1471 in Alexa global
'http://www.p5w.net/',
# Why: #1472 in Alexa global
'http://www.vg.no/',
# Why: #1473 in Alexa global
'http://www.discovery.com/',
# Why: #1474 in Alexa global
'http://www.gazzetta.it/',
# Why: #1475 in Alexa global
'http://www.tvguide.com/',
# Why: #1476 in Alexa global
'http://www.khabarfarsi.com/',
# Why: #1477 in Alexa global
'http://www.bradesco.com.br/',
# Why: #1478 in Alexa global
'http://www.autotrader.co.uk/',
# Why: #1479 in Alexa global
'http://www.wetransfer.com/',
# Why: #1480 in Alexa global
'http://jinti.com/',
# Why: #1481 in Alexa global
'http://www.xhamsterhq.com/',
# Why: #1482 in Alexa global
'http://www.appround.net/',
# Why: #1483 in Alexa global
'http://lotour.com/',
# Why: #1484 in Alexa global
'http://www.reverbnation.com/',
# Why: #1485 in Alexa global
'http://www.thedailybeast.com/',
# Why: #1486 in Alexa global
'http://www.vente-privee.com/',
# Why: #1487 in Alexa global
'http://www.subscribe.ru/',
# Why: #1488 in Alexa global
'http://www.clickjogos.uol.com.br/',
# Why: #1489 in Alexa global
'http://www.marketgid.com/',
# Why: #1490 in Alexa global
'http://www.super.cz/',
# Why: #1491 in Alexa global
'http://www.jvzoo.com/',
# Why: #1492 in Alexa global
'http://www.shine.com/',
# Why: #1493 in Alexa global
'http://www.screencast.com/',
# Why: #1494 in Alexa global
'http://www.picofile.com/',
# Why: #1495 in Alexa global
'http://www.manoramaonline.com/',
# Why: #1496 in Alexa global
'http://www.kbb.com/',
# Why: #1497 in Alexa global
'http://www.seasonvar.ru/',
# Why: #1498 in Alexa global
'http://www.android.com/',
# Why: #1499 in Alexa global
'http://www.egrana.com.br/',
# Why: #1501 in Alexa global
'http://www.ettoday.net/',
# Why: #1502 in Alexa global
'http://www.webstatsdomain.net/',
# Why: #1503 in Alexa global
'http://www.haberler.com/',
# Why: #1504 in Alexa global
'http://www.vesti.ru/',
# Why: #1505 in Alexa global
'http://www.fastpic.ru/',
# Why: #1506 in Alexa global
'http://www.dpreview.com/',
# Why: #1507 in Alexa global
'http://www.google.si/',
# Why: #1508 in Alexa global
'http://www.ouedkniss.com/',
# Why: #1509 in Alexa global
'http://www.crackle.com/',
# Why: #1510 in Alexa global
'http://www.chefkoch.de/',
# Why: #1511 in Alexa global
'http://www.mogujie.com/',
# Why: #1513 in Alexa global
'http://www.brassring.com/',
# Why: #1514 in Alexa global
'http://www.govome.com/',
# Why: #1515 in Alexa global
'http://www.copyscape.com/',
# Why: #1516 in Alexa global
'http://www.minecraftforum.net/',
# Why: #1517 in Alexa global
'http://www.mit.edu/',
# Why: #1518 in Alexa global
'http://www.cvs.com/',
# Why: #1519 in Alexa global
'http://www.timesjobs.com/',
# Why: #1520 in Alexa global
'http://www.ksl.com/',
# Why: #1521 in Alexa global
'http://www.verizon.net/',
# Why: #1522 in Alexa global
'http://www.direct.gov.uk/',
# Why: #1523 in Alexa global
'http://www.miralinks.ru/',
# Why: #1524 in Alexa global
'http://www.elheddaf.com/',
# Why: #1525 in Alexa global
'http://www.stockphoto9.com/',
# Why: #1526 in Alexa global
'http://www.ashemaletube.com/',
# Why: #1527 in Alexa global
'http://www.dmm.com/',
# Why: #1528 in Alexa global
'http://www.abckj123.com/',
# Why: #1529 in Alexa global
'http://www.smzdm.com/',
# Why: #1530 in Alexa global
'http://www.china.cn/',
# Why: #1531 in Alexa global
'http://www.cox.com/',
# Why: #1532 in Alexa global
'http://www.welt.de/',
# Why: #1533 in Alexa global
'http://www.guyspy.com/',
# Why: #1534 in Alexa global
'http://www.makeuseof.com/',
# Why: #1535 in Alexa global
'http://www.tiscali.it/',
# Why: #1536 in Alexa global
'http://www.178.com/',
# Why: #1537 in Alexa global
'http://www.metrolyrics.com/',
# Why: #1538 in Alexa global
'http://www.vsuch.com/',
# Why: #1539 in Alexa global
'http://www.seosprint.net/',
# Why: #1540 in Alexa global
'http://www.samanyoluhaber.com/',
# Why: #1541 in Alexa global
'http://www.garanti.com.tr/',
# Why: #1542 in Alexa global
'http://www.chicagotribune.com/',
# Why: #1543 in Alexa global
'http://www.hinet.net/',
# Why: #1544 in Alexa global
'http://www.kp.ru/',
# Why: #1545 in Alexa global
'http://www.chomikuj.pl/',
# Why: #1546 in Alexa global
'http://www.nk.pl/',
# Why: #1547 in Alexa global
'http://www.webhostingtalk.com/',
# Why: #1548 in Alexa global
'http://www.dnaindia.com/',
# Why: #1550 in Alexa global
'http://www.programme-tv.net/',
# Why: #1551 in Alexa global
'http://www.ievbz.com/',
# Why: #1552 in Alexa global
'http://www.mysql.com/',
# Why: #1553 in Alexa global
'http://www.perfectmoney.is/',
# Why: #1554 in Alexa global
'http://www.liveundnackt.com/',
# Why: #1555 in Alexa global
'http://www.flippa.com/',
# Why: #1556 in Alexa global
'http://www.vevo.com/',
# Why: #1557 in Alexa global
'http://www.jappy.de/',
# Why: #1558 in Alexa global
'http://www.bidvertiser.com/',
# Why: #1559 in Alexa global
'http://www.bankmandiri.co.id/',
# Why: #1560 in Alexa global
'http://www.letour.fr/',
# Why: #1561 in Alexa global
'http://www.yr.no/',
# Why: #1562 in Alexa global
'http://www.suning.com/',
# Why: #1563 in Alexa global
'http://www.nosub.tv/',
# Why: #1564 in Alexa global
'http://www.delicious.com/',
# Why: #1565 in Alexa global
'http://www.pornpoly.com/',
# Why: #1566 in Alexa global
'http://www.echo.msk.ru/',
# Why: #1567 in Alexa global
'http://www.coingeneration.com/',
# Why: #1568 in Alexa global
'http://www.shutterfly.com/',
# Why: #1569 in Alexa global
'http://www.royalbank.com/',
# Why: #1570 in Alexa global
'http://www.techradar.com/',
# Why: #1571 in Alexa global
'http://www.114la.com/',
# Why: #1572 in Alexa global
'http://www.bizrate.com/',
# Why: #1573 in Alexa global
'http://www.srvey.net/',
# Why: #1574 in Alexa global
'http://www.heavy-r.com/',
# Why: #1575 in Alexa global
'http://www.telexfree.com/',
# Why: #1576 in Alexa global
'http://www.lego.com/',
# Why: #1577 in Alexa global
'http://www.battlefield.com/',
# Why: #1578 in Alexa global
'http://www.shahrekhabar.com/',
# Why: #1579 in Alexa global
'http://www.tuenti.com/',
# Why: #1580 in Alexa global
'http://www.bookmyshow.com/',
# Why: #1581 in Alexa global
'http://www.gamme.com.tw/',
# Why: #1582 in Alexa global
'http://www.ft.com/',
# Why: #1583 in Alexa global
'http://www.prweb.com/',
# Why: #1584 in Alexa global
'http://www.1337x.org/',
# Why: #1585 in Alexa global
'http://www.networkedblogs.com/',
# Why: #1586 in Alexa global
'http://www.pbskids.org/',
# Why: #1587 in Alexa global
'http://aipai.com/',
# Why: #1588 in Alexa global
'http://www.jang.com.pk/',
# Why: #1589 in Alexa global
'http://www.dribbble.com/',
# Why: #1590 in Alexa global
'http://www.ezdownloadpro.info/',
# Why: #1591 in Alexa global
'http://www.gonzoxxxmovies.com/',
# Why: #1592 in Alexa global
'http://www.aufeminin.com/',
# Why: #1594 in Alexa global
'http://www.6pm.com/',
# Why: #1596 in Alexa global
'http://www.azet.sk/',
# Why: #1597 in Alexa global
'http://www.trustedoffer.com/',
# Why: #1598 in Alexa global
'http://www.simplyhired.com/',
# Why: #1599 in Alexa global
'http://www.adserverpub.com/',
# Why: #1600 in Alexa global
'http://www.privalia.com/',
# Why: #1601 in Alexa global
'http://www.bedbathandbeyond.com/',
# Why: #1602 in Alexa global
'http://www.yyets.com/',
# Why: #1603 in Alexa global
'http://verycd.com/',
# Why: #1604 in Alexa global
'http://www.sbnation.com/',
# Why: #1605 in Alexa global
'http://www.blogspot.nl/',
# Why: #1606 in Alexa global
'http://www.ikariam.com/',
# Why: #1607 in Alexa global
'http://www.sitepoint.com/',
# Why: #1608 in Alexa global
'http://www.gazeta.ru/',
# Why: #1609 in Alexa global
'http://www.tataindicom.com/',
# Why: #1610 in Alexa global
'http://chekb.com/',
# Why: #1611 in Alexa global
'http://www.literotica.com/',
# Why: #1612 in Alexa global
'http://www.ah-me.com/',
# Why: #1613 in Alexa global
'http://eztv.it/',
# Why: #1614 in Alexa global
'http://www.onliner.by/',
# Why: #1615 in Alexa global
'http://pptv.com/',
# Why: #1616 in Alexa global
'http://www.macrumors.com/',
# Why: #1617 in Alexa global
'http://www.xvideo-jp.com/',
# Why: #1618 in Alexa global
'http://www.state.tx.us/',
# Why: #1619 in Alexa global
'http://www.jamnews.ir/',
# Why: #1620 in Alexa global
'http://etoro.com/',
# Why: #1621 in Alexa global
'http://www.ny.gov/',
# Why: #1622 in Alexa global
'http://www.searchenginewatch.com/',
# Why: #1623 in Alexa global
'http://www.google.co.cr/',
# Why: #1624 in Alexa global
'http://www.td.com/',
# Why: #1625 in Alexa global
'http://www.ahrefs.com/',
# Why: #1626 in Alexa global
'http://www.337.com/',
# Why: #1627 in Alexa global
'http://www.klout.com/',
# Why: #1628 in Alexa global
'http://www.ebay.es/',
# Why: #1629 in Alexa global
'http://www.theverge.com/',
# Why: #1631 in Alexa global
'http://www.kapook.com/',
# Why: #1632 in Alexa global
'http://www.barclays.co.uk/',
# Why: #1634 in Alexa global
'http://nuomi.com/',
# Why: #1635 in Alexa global
'http://www.index-of-mp3s.com/',
# Why: #1636 in Alexa global
'http://www.ohfreesex.com/',
# Why: #1637 in Alexa global
'http://www.mts.ru/',
# Why: #1638 in Alexa global
'http://www.itmedia.co.jp/',
# Why: #1639 in Alexa global
'http://www.instantcheckmate.com/',
# Why: #1640 in Alexa global
'http://www.sport.es/',
# Why: #1641 in Alexa global
'http://www.sitescout.com/',
# Why: #1642 in Alexa global
'http://www.irr.ru/',
# Why: #1643 in Alexa global
'http://tuniu.com/',
# Why: #1644 in Alexa global
'http://www.startimes.com/',
# Why: #1645 in Alexa global
'http://www.tvn24.pl/',
# Why: #1646 in Alexa global
'http://www.kenh14.vn/',
# Why: #1647 in Alexa global
'http://www.myvideo.de/',
# Why: #1648 in Alexa global
'http://www.speedbit.com/',
# Why: #1649 in Alexa global
'http://www.aljazeera.com/',
# Why: #1650 in Alexa global
'http://www.pudelek.pl/',
# Why: #1651 in Alexa global
'http://www.mmgp.ru/',
# Why: #1652 in Alexa global
'http://www.empflix.com/',
# Why: #1653 in Alexa global
'http://www.tigerdirect.com/',
# Why: #1655 in Alexa global
'http://www.elegantthemes.com/',
# Why: #1657 in Alexa global
'http://www.ted.com/',
# Why: #1658 in Alexa global
'http://www.carview.co.jp/',
# Why: #1659 in Alexa global
'http://www.down1oads.com/',
# Why: #1660 in Alexa global
'http://www.bancobrasil.com.br/',
# Why: #1661 in Alexa global
'http://www.qip.ru/',
# Why: #1662 in Alexa global
'http://www.nikkeibp.co.jp/',
# Why: #1663 in Alexa global
'http://www.fapdu.com/',
# Why: #1664 in Alexa global
'http://www.softango.com/',
# Why: #1665 in Alexa global
'http://www.ap.org/',
# Why: #1666 in Alexa global
'http://www.meteofrance.com/',
# Why: #1667 in Alexa global
'http://www.gentenocturna.com/',
# Why: #1668 in Alexa global
'http://www.2ch-c.net/',
# Why: #1669 in Alexa global
'http://www.orf.at/',
# Why: #1670 in Alexa global
'http://www.maybank2u.com.my/',
# Why: #1671 in Alexa global
'http://www.minecraftwiki.net/',
# Why: #1672 in Alexa global
'http://www.tv.com/',
# Why: #1673 in Alexa global
'http://www.orkut.com/',
# Why: #1674 in Alexa global
'http://www.adp.com/',
# Why: #1675 in Alexa global
'http://www.woorank.com/',
# Why: #1676 in Alexa global
'http://www.imagetwist.com/',
# Why: #1677 in Alexa global
'http://www.pastebin.com/',
# Why: #1678 in Alexa global
'http://www.airtel.com/',
# Why: #1679 in Alexa global
'http://www.ew.com/',
# Why: #1680 in Alexa global
'http://www.forever21.com/',
# Why: #1681 in Alexa global
'http://www.adam4adam.com/',
# Why: #1682 in Alexa global
'http://www.voyages-sncf.com/',
# Why: #1683 in Alexa global
'http://www.nextag.com/',
# Why: #1684 in Alexa global
'http://www.usnews.com/',
# Why: #1685 in Alexa global
'http://www.dinamalar.com/',
# Why: #1686 in Alexa global
'http://www.impress.co.jp/',
# Why: #1687 in Alexa global
'http://www.virginmedia.com/',
# Why: #1688 in Alexa global
'http://www.investopedia.com/',
# Why: #1689 in Alexa global
'http://www.seekingalpha.com/',
# Why: #1690 in Alexa global
'http://www.jumponhottie.com/',
# Why: #1691 in Alexa global
'http://www.national-lottery.co.uk/',
# Why: #1692 in Alexa global
'http://www.mobifiesta.com/',
# Why: #1693 in Alexa global
'http://www.kapanlagi.com/',
# Why: #1694 in Alexa global
'http://www.segundamano.es/',
# Why: #1695 in Alexa global
'http://gfan.com/',
# Why: #1696 in Alexa global
'http://www.xdating.com/',
# Why: #1697 in Alexa global
'http://www.ynet.com/',
# Why: #1698 in Alexa global
'http://www.medu.ir/',
# Why: #1699 in Alexa global
'http://www.hsn.com/',
# Why: #1700 in Alexa global
'http://www.newsru.com/',
# Why: #1701 in Alexa global
'http://www.minus.com/',
# Why: #1702 in Alexa global
'http://www.sitetalk.com/',
# Why: #1703 in Alexa global
'http://www.aarp.org/',
# Why: #1704 in Alexa global
'http://www.clickpaid.com/',
# Why: #1705 in Alexa global
'http://www.panoramio.com/',
# Why: #1706 in Alexa global
'http://www.webcamo.com/',
# Why: #1707 in Alexa global
'http://www.yobt.tv/',
# Why: #1708 in Alexa global
'http://www.slutfinder.com/',
# Why: #1709 in Alexa global
'http://www.freelotto.com/',
# Why: #1710 in Alexa global
'http://www.mudah.my/',
# Why: #1711 in Alexa global
'http://www.toptenreviews.com/',
# Why: #1712 in Alexa global
'http://www.caisse-epargne.fr/',
# Why: #1713 in Alexa global
'http://www.wimp.com/',
# Why: #1714 in Alexa global
'http://www.woothemes.com/',
# Why: #1715 in Alexa global
'http://www.css-tricks.com/',
# Why: #1716 in Alexa global
'http://www.coolmath-games.com/',
# Why: #1717 in Alexa global
'http://www.tagu.com.ar/',
# Why: #1718 in Alexa global
'http://www.sheknows.com/',
# Why: #1719 in Alexa global
'http://www.advancedfileoptimizer.com/',
# Why: #1720 in Alexa global
'http://www.drupal.org/',
# Why: #1721 in Alexa global
'http://www.centrum.cz/',
# Why: #1722 in Alexa global
'http://www.charter.net/',
# Why: #1724 in Alexa global
'http://adxhosting.net/',
# Why: #1725 in Alexa global
'http://www.squarespace.com/',
# Why: #1726 in Alexa global
'http://www.trademe.co.nz/',
# Why: #1727 in Alexa global
'http://www.sitesell.com/',
# Why: #1728 in Alexa global
'http://www.birthrecods.com/',
# Why: #1729 in Alexa global
'http://www.megashare.info/',
# Why: #1730 in Alexa global
'http://www.freepornvs.com/',
# Why: #1731 in Alexa global
'http://www.isna.ir/',
# Why: #1732 in Alexa global
'http://www.ziddu.com/',
# Why: #1733 in Alexa global
'http://www.airtelforum.com/',
# Why: #1734 in Alexa global
'http://www.justin.tv/',
# Why: #1735 in Alexa global
'http://www.01net.com/',
# Why: #1736 in Alexa global
'http://www.ed.gov/',
# Why: #1737 in Alexa global
'http://www.no-ip.com/',
# Why: #1738 in Alexa global
'http://www.nikkansports.com/',
# Why: #1739 in Alexa global
'http://www.smashingmagazine.com/',
# Why: #1740 in Alexa global
'http://www.salon.com/',
# Why: #1741 in Alexa global
'http://www.nmisr.com/',
# Why: #1742 in Alexa global
'http://www.wanggou.com/',
# Why: #1743 in Alexa global
'http://www.bayt.com/',
# Why: #1744 in Alexa global
'http://www.codeproject.com/',
# Why: #1745 in Alexa global
'http://www.downloadha.com/',
# Why: #1746 in Alexa global
'http://www.local.com/',
# Why: #1747 in Alexa global
'http://www.abola.pt/',
# Why: #1748 in Alexa global
'http://www.delta-homes.com/',
# Why: #1749 in Alexa global
'http://www.filmweb.pl/',
# Why: #1750 in Alexa global
'http://www.gov.uk/',
# Why: #1751 in Alexa global
'http://www.worldoftanks.eu/',
# Why: #1752 in Alexa global
'http://www.ads-id.com/',
# Why: #1753 in Alexa global
'http://www.sergey-mavrodi.com/',
# Why: #1754 in Alexa global
'http://www.pornoid.com/',
# Why: #1755 in Alexa global
'http://www.freakshare.com/',
# Why: #1756 in Alexa global
'http://www.51fanli.com/',
# Why: #1757 in Alexa global
'http://www.bankrate.com/',
# Why: #1758 in Alexa global
'http://www.grindtv.com/',
# Why: #1759 in Alexa global
'http://www.webmasterworld.com/',
# Why: #1760 in Alexa global
'http://www.torrentz.in/',
# Why: #1761 in Alexa global
'http://www.bwin.com/',
# Why: #1762 in Alexa global
'http://www.watchtower.com/',
# Why: #1763 in Alexa global
'http://www.payza.com/',
# Why: #1764 in Alexa global
'http://www.eol.cn/',
# Why: #1765 in Alexa global
'http://www.anz.com/',
# Why: #1766 in Alexa global
'http://www.vagalume.com.br/',
# Why: #1767 in Alexa global
'http://www.ozon.ru/',
# Why: #1768 in Alexa global
'http://www.cnr.cn/',
# Why: #1769 in Alexa global
'http://www.tonicmovies.com/',
# Why: #1771 in Alexa global
'http://www.arbeitsagentur.de/',
# Why: #1772 in Alexa global
'http://www.graphicriver.net/',
# Why: #1773 in Alexa global
'http://www.theweathernetwork.com/',
# Why: #1774 in Alexa global
'http://www.samsclub.com/',
# Why: #1775 in Alexa global
'http://www.tribunnews.com/',
# Why: #1776 in Alexa global
'http://www.soldonsmart.com/',
# Why: #1777 in Alexa global
'http://www.tut.by/',
# Why: #1778 in Alexa global
'http://www.voila.fr/',
# Why: #1779 in Alexa global
'http://www.doctissimo.fr/',
# Why: #1780 in Alexa global
'http://www.sueddeutsche.de/',
# Why: #1781 in Alexa global
'http://www.mamba.ru/',
# Why: #1782 in Alexa global
'http://www.kmart.com/',
# Why: #1783 in Alexa global
'http://www.noticias.uol.com.br/',
# Why: #1784 in Alexa global
'http://www.abc.es/',
# Why: #1785 in Alexa global
'http://www.manager.co.th/',
# Why: #1786 in Alexa global
'http://www.spokeo.com/',
# Why: #1787 in Alexa global
'http://www.apache.org/',
# Why: #1788 in Alexa global
'http://www.tdbank.com/',
# Why: #1789 in Alexa global
'http://www.asklaila.com/',
# Why: #1790 in Alexa global
'http://admin5.net/',
# Why: #1791 in Alexa global
'http://www.rtve.es/',
# Why: #1792 in Alexa global
'http://www.ynet.co.il/',
# Why: #1793 in Alexa global
'http://www.infospace.com/',
# Why: #1794 in Alexa global
'http://yimg.com/',
# Why: #1795 in Alexa global
'http://www.torcache.net/',
# Why: #1796 in Alexa global
'http://www.zap2it.com/',
# Why: #1797 in Alexa global
'http://www.smallseotools.com/',
# Why: #1798 in Alexa global
'http://www.privatbank.ua/',
# Why: #1799 in Alexa global
'http://www.nnm-club.ru/',
# Why: #1800 in Alexa global
'http://www.payoneer.com/',
# Why: #1801 in Alexa global
'http://www.bidorbuy.co.za/',
# Why: #1802 in Alexa global
'http://www.islamweb.net/',
# Why: #1803 in Alexa global
'http://www.juicyads.com/',
# Why: #1804 in Alexa global
'http://www.vid2c.com/',
# Why: #1805 in Alexa global
'http://rising.cn/',
# Why: #1806 in Alexa global
'http://www.dnsrsearch.com/',
# Why: #1807 in Alexa global
'http://www.the-bux.net/',
# Why: #1808 in Alexa global
'http://www.yaplakal.com/',
# Why: #1809 in Alexa global
'http://www.ex.ua/',
# Why: #1810 in Alexa global
'http://www.mtsindia.in/',
# Why: #1811 in Alexa global
'http://www.reclameaqui.com.br/',
# Why: #1812 in Alexa global
'http://www.postbank.de/',
# Why: #1813 in Alexa global
'http://www.gogvo.com/',
# Why: #1814 in Alexa global
'http://www.bearshare.net/',
# Why: #1815 in Alexa global
'http://www.socialsex.com/',
# Why: #1816 in Alexa global
'http://www.yebhi.com/',
# Why: #1817 in Alexa global
'http://www.mktmobi.com/',
# Why: #1818 in Alexa global
'http://www.hotpepper.jp/',
# Why: #1819 in Alexa global
'http://www.dfiles.eu/',
# Why: #1820 in Alexa global
'http://www.citibank.co.in/',
# Why: #1821 in Alexa global
'http://gamersky.com/',
# Why: #1822 in Alexa global
'http://www.kotaku.com/',
# Why: #1823 in Alexa global
'http://www.teamviewer.com/',
# Why: #1824 in Alexa global
'http://www.kwejk.pl/',
# Why: #1825 in Alexa global
'http://www.hamariweb.com/',
# Why: #1826 in Alexa global
'http://www.tom.com/',
# Why: #1827 in Alexa global
'http://www.gayromeo.com/',
# Why: #1828 in Alexa global
'http://www.sony.com/',
# Why: #1829 in Alexa global
'http://www.westpac.com.au/',
# Why: #1830 in Alexa global
'http://www.gtmetrix.com/',
# Why: #1831 in Alexa global
'http://www.shorouknews.com/',
# Why: #1832 in Alexa global
'http://www.xl.pt/',
# Why: #1833 in Alexa global
'http://www.networksolutions.com/',
# Why: #1834 in Alexa global
'http://www.500px.com/',
# Why: #1835 in Alexa global
'http://www.ypmate.com/',
# Why: #1836 in Alexa global
'http://www.indowebster.com/',
# Why: #1837 in Alexa global
'http://www.sports.ru/',
# Why: #1838 in Alexa global
'http://www.netshoes.com.br/',
# Why: #1839 in Alexa global
'http://familydoctor.com.cn/',
# Why: #1840 in Alexa global
'http://www.dfiles.ru/',
# Why: #1841 in Alexa global
'http://www.cpasbien.me/',
# Why: #1842 in Alexa global
'http://www.webgame.web.id/',
# Why: #1843 in Alexa global
'http://www.tuto4pc.com/',
# Why: #1844 in Alexa global
'http://www.poponclick.com/',
# Why: #1845 in Alexa global
'http://www.complex.com/',
# Why: #1846 in Alexa global
'http://www.sakshi.com/',
# Why: #1847 in Alexa global
'http://www.infobae.com/',
# Why: #1848 in Alexa global
'http://www.allabout.co.jp/',
# Why: #1849 in Alexa global
'http://www.sify.com/',
# Why: #1850 in Alexa global
'http://www.4pda.ru/',
# Why: #1851 in Alexa global
'http://www.starsue.net/',
# Why: #1852 in Alexa global
'http://www.newgrounds.com/',
# Why: #1853 in Alexa global
'http://www.mehrnews.com/',
# Why: #1854 in Alexa global
'http://www.depositphotos.com/',
# Why: #1855 in Alexa global
'http://www.keek.com/',
# Why: #1856 in Alexa global
'http://www.indeed.co.in/',
# Why: #1857 in Alexa global
'http://www.stanford.edu/',
# Why: #1858 in Alexa global
'http://www.hepsiburada.com/',
# Why: #1859 in Alexa global
'http://www.20minutos.es/',
# Why: #1860 in Alexa global
'http://www.paper.li/',
# Why: #1861 in Alexa global
'http://www.prizee.com/',
# Why: #1862 in Alexa global
'http://www.xlovecam.com/',
# Why: #1863 in Alexa global
'http://www.criteo.com/',
# Why: #1864 in Alexa global
'http://www.endlessmatches.com/',
# Why: #1865 in Alexa global
'http://www.dyndns.org/',
# Why: #1866 in Alexa global
'http://www.lightinthebox.com/',
# Why: #1867 in Alexa global
'http://www.easyjet.com/',
# Why: #1869 in Alexa global
'http://www.vice.com/',
# Why: #1870 in Alexa global
'http://tiexue.net/',
# Why: #1871 in Alexa global
'http://www.monstermarketplace.com/',
# Why: #1872 in Alexa global
'http://www.mojang.com/',
# Why: #1873 in Alexa global
'http://www.cams.com/',
# Why: #1874 in Alexa global
'http://www.pingdom.com/',
# Why: #1875 in Alexa global
'http://www.askmen.com/',
# Why: #1876 in Alexa global
'http://www.list-manage1.com/',
# Why: #1878 in Alexa global
'http://www.express.com.pk/',
# Why: #1879 in Alexa global
'http://www.priceminister.com/',
# Why: #1880 in Alexa global
'http://www.duba.com/',
# Why: #1881 in Alexa global
'http://www.meinestadt.de/',
# Why: #1882 in Alexa global
'http://www.mediatakeout.com/',
# Why: #1883 in Alexa global
'http://www.w3school.com.cn/',
# Why: #1884 in Alexa global
'http://www.terere.info/',
# Why: #1885 in Alexa global
'http://www.streamate.com/',
# Why: #1886 in Alexa global
'http://www.garmin.com/',
# Why: #1887 in Alexa global
'http://www.a-telecharger.com/',
# Why: #1888 in Alexa global
'http://www.vipzona.info/',
# Why: #1889 in Alexa global
'http://www.coffetube.com/',
# Why: #1890 in Alexa global
'http://www.discuz.net/',
# Why: #1891 in Alexa global
'http://www.directv.com/',
# Why: #1892 in Alexa global
'http://www.foreningssparbanken.se/',
# Why: #1893 in Alexa global
'http://www.fatwallet.com/',
# Why: #1894 in Alexa global
'http://www.mackolik.com/',
# Why: #1895 in Alexa global
'http://www.megacinema.fr/',
# Why: #1896 in Alexa global
'http://www.chess.com/',
# Why: #1897 in Alexa global
'http://www.suntrust.com/',
# Why: #1898 in Alexa global
'http://www.investing.com/',
# Why: #1899 in Alexa global
'http://www.whois.com/',
# Why: #1900 in Alexa global
'http://www.dummies.com/',
# Why: #1901 in Alexa global
'http://www.yinyuetai.com/',
# Why: #1902 in Alexa global
'http://www.mihandownload.com/',
# Why: #1903 in Alexa global
'http://www.freapp.com/',
# Why: #1904 in Alexa global
'http://www.theage.com.au/',
# Why: #1905 in Alexa global
'http://www.audible.com/',
# Why: #1906 in Alexa global
'http://www.hangame.co.jp/',
# Why: #1907 in Alexa global
'http://www.hotelurbano.com.br/',
# Why: #1908 in Alexa global
'http://www.vatgia.com/',
# Why: #1909 in Alexa global
'http://www.wizard101.com/',
# Why: #1910 in Alexa global
'http://www.ceneo.pl/',
# Why: #1911 in Alexa global
'http://1ting.com/',
# Why: #1912 in Alexa global
'http://www.meetic.fr/',
# Why: #1913 in Alexa global
'http://www.cardekho.com/',
# Why: #1914 in Alexa global
'http://www.tripadvisor.it/',
# Why: #1915 in Alexa global
'http://www.dhl.com/',
# Why: #1916 in Alexa global
'http://www.aibang.com/',
# Why: #1917 in Alexa global
'http://www.asp.net/',
# Why: #1918 in Alexa global
'http://www.toing.com.br/',
# Why: #1920 in Alexa global
'http://zhubajie.com/',
# Why: #1921 in Alexa global
'http://www.telecomitalia.it/',
# Why: #1922 in Alexa global
'http://www.claro-search.com/',
# Why: #1923 in Alexa global
'http://www.nickjr.com/',
# Why: #1924 in Alexa global
'http://www.iconfinder.com/',
# Why: #1925 in Alexa global
'http://www.mobile9.com/',
# Why: #1926 in Alexa global
'http://www.mainichi.jp/',
# Why: #1927 in Alexa global
'http://www.cisco.com/',
# Why: #1928 in Alexa global
'http://www.cpanel.net/',
# Why: #1929 in Alexa global
'http://www.indiegogo.com/',
# Why: #1930 in Alexa global
'http://www.egotastic.com/',
# Why: #1931 in Alexa global
'http://www.hforcare.com/',
# Why: #1932 in Alexa global
'http://www.pbs.org/',
# Why: #1933 in Alexa global
'http://www.realestate.com.au/',
# Why: #1934 in Alexa global
'http://www.abv.bg/',
# Why: #1935 in Alexa global
'http://www.drugs.com/',
# Why: #1936 in Alexa global
'http://www.bt.com/',
# Why: #1937 in Alexa global
'http://www.wildberries.ru/',
# Why: #1938 in Alexa global
'http://www.edreams.it/',
# Why: #1939 in Alexa global
'http://www.statigr.am/',
# Why: #1940 in Alexa global
'http://www.prestashop.com/',
# Why: #1941 in Alexa global
'http://www.adxite.com/',
# Why: #1942 in Alexa global
'http://www.birthdaypeoms.com/',
# Why: #1943 in Alexa global
'http://www.exbii.com/',
# Why: #1944 in Alexa global
'http://www.blogmura.com/',
# Why: #1945 in Alexa global
'http://www.sciencedirect.com/',
# Why: #1946 in Alexa global
'http://www.sanspo.com/',
# Why: #1947 in Alexa global
'http://www.nextmedia.com/',
# Why: #1948 in Alexa global
'http://www.tvoyauda4a.ru/',
# Why: #1949 in Alexa global
'http://tangdou.com/',
# Why: #1950 in Alexa global
'http://www.blackboard.com/',
# Why: #1951 in Alexa global
'http://qiyou.com/',
# Why: #1952 in Alexa global
'http://www.youth.cn/',
# Why: #1953 in Alexa global
'http://www.prezentacya.ru/',
# Why: #1954 in Alexa global
'http://www.clicrbs.com.br/',
# Why: #1955 in Alexa global
'http://www.wayfair.com/',
# Why: #1956 in Alexa global
'http://www.xvideos-field.com/',
# Why: #1957 in Alexa global
'http://www.national.com.au/',
# Why: #1958 in Alexa global
'http://www.friendfeed.com/',
# Why: #1959 in Alexa global
'http://www.plurk.com/',
# Why: #1960 in Alexa global
'http://www.lolmake.com/',
# Why: #1961 in Alexa global
'http://www.b9dm.com/',
# Why: #1962 in Alexa global
'http://www.afkarnews.ir/',
# Why: #1963 in Alexa global
'http://www.dhl.de/',
# Why: #1964 in Alexa global
'http://www.championat.com/',
# Why: #1965 in Alexa global
'http://www.moviefone.com/',
# Why: #1966 in Alexa global
'http://www.popcash.net/',
# Why: #1967 in Alexa global
'http://www.cliphunter.com/',
# Why: #1968 in Alexa global
'http://www.sharebeast.com/',
# Why: #1969 in Alexa global
'http://www.wowhead.com/',
# Why: #1970 in Alexa global
'http://www.firstpost.com/',
# Why: #1971 in Alexa global
'http://www.lloydstsb.com/',
# Why: #1972 in Alexa global
'http://www.fazenda.gov.br/',
# Why: #1973 in Alexa global
'http://www.lonelyplanet.com/',
# Why: #1974 in Alexa global
'http://www.freenet.de/',
# Why: #1975 in Alexa global
'http://www.justanswer.com/',
# Why: #1977 in Alexa global
'http://www.qiwi.com/',
# Why: #1978 in Alexa global
'http://www.shufuni.com/',
# Why: #1979 in Alexa global
'http://www.drive2.ru/',
# Why: #1980 in Alexa global
'http://www.slando.ua/',
# Why: #1981 in Alexa global
'http://www.caribbeancom.com/',
# Why: #1982 in Alexa global
'http://www.uniblue.com/',
# Why: #1983 in Alexa global
'http://www.real.com/',
# Why: #1984 in Alexa global
'http://www.addictinggames.com/',
# Why: #1985 in Alexa global
'http://www.wnd.com/',
# Why: #1986 in Alexa global
'http://www.col3negoriginal.org/',
# Why: #1987 in Alexa global
'http://www.loltrk.com/',
# Why: #1988 in Alexa global
'http://www.videodownloadconverter.com/',
# Why: #1989 in Alexa global
'http://www.google.lv/',
# Why: #1990 in Alexa global
'http://www.seriesyonkis.com/',
# Why: #1991 in Alexa global
'http://www.ryushare.com/',
# Why: #1992 in Alexa global
'http://s1979.com/',
# Why: #1993 in Alexa global
'http://www.cheapoair.com/',
# Why: #1994 in Alexa global
'http://www.plala.or.jp/',
# Why: #1995 in Alexa global
'http://www.submarino.com.br/',
# Why: #1996 in Alexa global
'http://www.topface.com/',
# Why: #1998 in Alexa global
'http://www.hotelscombined.com/',
# Why: #1999 in Alexa global
'http://www.whatismyipaddress.com/',
# Why: #2000 in Alexa global
'http://www.z6.com/',
# Why: #2001 in Alexa global
'http://www.sozcu.com.tr/',
# Why: #2002 in Alexa global
'http://www.sonymobile.com/',
# Why: #2003 in Alexa global
'http://www.planetminecraft.com/',
# Why: #2004 in Alexa global
'http://www.optimum.net/',
# Why: #2005 in Alexa global
'http://www.google.com.pr/',
# Why: #2006 in Alexa global
'http://mthai.com/',
# Why: #2007 in Alexa global
'http://www.onlinecreditcenter6.com/',
# Why: #2008 in Alexa global
'http://www.tharunaya.co.uk/',
# Why: #2009 in Alexa global
'http://www.sfimg.com/',
# Why: #2010 in Alexa global
'http://www.natwest.com/',
# Why: #2011 in Alexa global
'http://www.zergnet.com/',
# Why: #2012 in Alexa global
'http://www.alotporn.com/',
# Why: #2013 in Alexa global
'http://www.urbanspoon.com/',
# Why: #2014 in Alexa global
'http://www.punishtube.com/',
# Why: #2015 in Alexa global
'http://www.proboards.com/',
# Why: #2016 in Alexa global
'http://www.betfair.com/',
# Why: #2017 in Alexa global
'http://www.iltasanomat.fi/',
# Why: #2018 in Alexa global
'http://www.ssisurveys.com/',
# Why: #2019 in Alexa global
'http://www.mapion.co.jp/',
# Why: #2020 in Alexa global
'http://www.harvard.edu/',
# Why: #2021 in Alexa global
'http://www.blic.rs/',
# Why: #2022 in Alexa global
'http://www.clicksia.com/',
# Why: #2023 in Alexa global
'http://www.skillpages.com/',
# Why: #2024 in Alexa global
'http://www.mobilewap.com/',
# Why: #2025 in Alexa global
'http://www.fiducia.de/',
# Why: #2026 in Alexa global
'http://www.torntvz.org/',
# Why: #2027 in Alexa global
'http://www.leparisien.fr/',
# Why: #2028 in Alexa global
'http://anjuke.com/',
# Why: #2029 in Alexa global
'http://www.rabobank.nl/',
# Why: #2030 in Alexa global
'http://www.sport.pl/',
# Why: #2031 in Alexa global
'http://www.schwab.com/',
# Why: #2032 in Alexa global
'http://www.buenastareas.com/',
# Why: #2033 in Alexa global
'http://www.befuck.com/',
# Why: #2034 in Alexa global
'http://www.smart-search.com/',
# Why: #2035 in Alexa global
'http://www.ivi.ru/',
# Why: #2036 in Alexa global
'http://www.2z.cn/',
# Why: #2037 in Alexa global
'http://www.dvdvideosoft.com/',
# Why: #2038 in Alexa global
'http://www.ubi.com/',
# Why: #2039 in Alexa global
'http://makepolo.com/',
# Why: #2040 in Alexa global
'http://www.1and1.com/',
# Why: #2041 in Alexa global
'http://www.anipo.jp/',
# Why: #2042 in Alexa global
'http://www.pcworld.com/',
# Why: #2043 in Alexa global
'http://www.caf.fr/',
# Why: #2044 in Alexa global
'http://www.fnb.co.za/',
# Why: #2045 in Alexa global
'http://www.vanguardngr.com/',
# Why: #2046 in Alexa global
'http://www.floozycity.com/',
# Why: #2047 in Alexa global
'http://www.ubuntu.com/',
# Why: #2048 in Alexa global
'http://www.my-link.pro/',
# Why: #2049 in Alexa global
'http://www.daily.co.jp/',
# Why: #2050 in Alexa global
'http://www.centurylink.com/',
# Why: #2051 in Alexa global
'http://www.slashdot.org/',
# Why: #2052 in Alexa global
'http://www.mirrorcreator.com/',
# Why: #2053 in Alexa global
'http://www.rutube.ru/',
# Why: #2054 in Alexa global
'http://www.tubeplus.me/',
# Why: #2055 in Alexa global
'http://www.kicker.de/',
# Why: #2056 in Alexa global
'http://www.unibet.com/',
# Why: #2057 in Alexa global
'http://www.pornyaz.com/',
# Why: #2058 in Alexa global
'http://www.learntotradethemarket.com/',
# Why: #2059 in Alexa global
'http://www.tokyo-porn-tube.com/',
# Why: #2060 in Alexa global
'http://www.luvcow.com/',
# Why: #2061 in Alexa global
'http://www.i.ua/',
# Why: #2062 in Alexa global
'http://www.ole.com.ar/',
# Why: #2063 in Alexa global
'http://www.redfin.com/',
# Why: #2064 in Alexa global
'http://www.cnki.net/',
# Why: #2065 in Alexa global
'http://www.2shared.com/',
# Why: #2066 in Alexa global
'http://www.infibeam.com/',
# Why: #2067 in Alexa global
'http://www.zdnet.com/',
# Why: #2068 in Alexa global
'http://www.fishki.net/',
# Why: #2069 in Alexa global
'http://msn.com.cn/',
# Why: #2070 in Alexa global
'http://www.ukr.net/',
# Why: #2071 in Alexa global
'http://www.scol.com.cn/',
# Why: #2072 in Alexa global
'http://www.jiameng.com/',
# Why: #2073 in Alexa global
'http://www.utorrent.com/',
# Why: #2074 in Alexa global
'http://www.elkhabar.com/',
# Why: #2075 in Alexa global
'http://www.anime44.com/',
# Why: #2076 in Alexa global
'http://www.societegenerale.fr/',
# Why: #2077 in Alexa global
'http://www.livememe.com/',
# Why: #2078 in Alexa global
'http://www.warning.or.kr/',
# Why: #2079 in Alexa global
'http://www.startertv.fr/',
# Why: #2080 in Alexa global
'http://www.pingomatic.com/',
# Why: #2081 in Alexa global
'http://www.indeed.co.uk/',
# Why: #2082 in Alexa global
'http://www.dpstream.net/',
# Why: #2083 in Alexa global
'http://www.mundodeportivo.com/',
# Why: #2084 in Alexa global
'http://www.gravatar.com/',
# Why: #2085 in Alexa global
'http://www.ip138.com/',
# Why: #2086 in Alexa global
'http://www.zcool.com.cn/',
# Why: #2087 in Alexa global
'http://www.yandex.net/',
# Why: #2088 in Alexa global
'http://www.barbie.com/',
# Why: #2089 in Alexa global
'http://www.wattpad.com/',
# Why: #2090 in Alexa global
'http://www.dzwww.com/',
# Why: #2091 in Alexa global
'http://www.technorati.com/',
# Why: #2092 in Alexa global
'http://meishichina.com/',
# Why: #2093 in Alexa global
'http://www.russianpost.ru/',
# Why: #2094 in Alexa global
'http://www.kboing.com.br/',
# Why: #2095 in Alexa global
'http://www.lzjl.com/',
# Why: #2096 in Alexa global
'http://www.newsnow.co.uk/',
# Why: #2097 in Alexa global
'http://www.dw.de/',
# Why: #2098 in Alexa global
'http://www.inetglobal.com/',
# Why: #2099 in Alexa global
'http://www.tripadvisor.in/',
# Why: #2100 in Alexa global
'http://www.ashleyrnadison.com/',
# Why: #2101 in Alexa global
'http://www.rapgenius.com/',
# Why: #2102 in Alexa global
'http://www.xuite.net/',
# Why: #2103 in Alexa global
'http://www.nowvideo.eu/',
# Why: #2104 in Alexa global
'http://www.search.us.com/',
# Why: #2105 in Alexa global
'http://www.usagc.org/',
# Why: #2106 in Alexa global
'http://www.santander.co.uk/',
# Why: #2107 in Alexa global
'http://www.99acres.com/',
# Why: #2108 in Alexa global
'http://www.bigcartel.com/',
# Why: #2109 in Alexa global
'http://www.haivl.com/',
# Why: #2110 in Alexa global
'http://www.jsfiddle.net/',
# Why: #2111 in Alexa global
'http://www.io9.com/',
# Why: #2112 in Alexa global
'http://www.lg.com/',
# Why: #2113 in Alexa global
'http://www.veoh.com/',
# Why: #2114 in Alexa global
'http://www.dafiti.com.br/',
# Why: #2115 in Alexa global
'http://www.heise.de/',
# Why: #2117 in Alexa global
'http://www.wikispaces.com/',
# Why: #2118 in Alexa global
'http://www.google.com.bo/',
# Why: #2119 in Alexa global
'http://www.skyscrapercity.com/',
# Why: #2120 in Alexa global
'http://www.zaobao.com/',
# Why: #2121 in Alexa global
'http://www.pirateproxy.net/',
# Why: #2122 in Alexa global
'http://www.muyzorras.com/',
# Why: #2123 in Alexa global
'http://www.iza.ne.jp/',
# Why: #2124 in Alexa global
'http://www.entrepreneur.com/',
# Why: #2125 in Alexa global
'http://www.sxc.hu/',
# Why: #2126 in Alexa global
'http://www.superuser.com/',
# Why: #2127 in Alexa global
'http://www.jb51.net/',
# Why: #2128 in Alexa global
'http://www.bitsnoop.com/',
# Why: #2129 in Alexa global
'http://www.index.hu/',
# Why: #2130 in Alexa global
'http://www.tubexclips.com/',
# Why: #2131 in Alexa global
'http://www.symantec.com/',
# Why: #2132 in Alexa global
'http://www.sedo.com/',
# Why: #2133 in Alexa global
'http://www.gongchang.com/',
# Why: #2134 in Alexa global
'http://www.haibao.cn/',
# Why: #2135 in Alexa global
'http://www.newsmth.net/',
# Why: #2136 in Alexa global
'http://srclick.ru/',
# Why: #2137 in Alexa global
'http://www.bomnegocio.com/',
# Why: #2138 in Alexa global
'http://www.omegle.com/',
# Why: #2139 in Alexa global
'http://www.sweetpacks-search.com/',
# Why: #2140 in Alexa global
'http://www.000webhost.com/',
# Why: #2141 in Alexa global
'http://www.rencontreshard.com/',
# Why: #2142 in Alexa global
'http://www.jumei.com/',
# Why: #2143 in Alexa global
'http://www.acfun.tv/',
# Why: #2144 in Alexa global
'http://www.celebuzz.com/',
# Why: #2145 in Alexa global
'http://www.el-balad.com/',
# Why: #2146 in Alexa global
'http://www.wajam.com/',
# Why: #2147 in Alexa global
'http://www.zoopla.co.uk/',
# Why: #2148 in Alexa global
'http://sc4888.com/',
# Why: #2149 in Alexa global
'http://www.mobileaziende.it/',
# Why: #2150 in Alexa global
'http://www.officialsurvey.org/',
# Why: #2151 in Alexa global
'http://googleapis.com/',
# Why: #2152 in Alexa global
'http://www.mufg.jp/',
# Why: #2153 in Alexa global
'http://www.jobsdb.com/',
# Why: #2154 in Alexa global
'http://www.yahoo.com.cn/',
# Why: #2155 in Alexa global
'http://www.google.com.sv/',
# Why: #2156 in Alexa global
'http://www.freejobalert.com/',
# Why: #2157 in Alexa global
'http://www.walla.co.il/',
# Why: #2158 in Alexa global
'http://www.hollywoodreporter.com/',
# Why: #2159 in Alexa global
'http://www.shop-pro.jp/',
# Why: #2160 in Alexa global
'http://www.inc.com/',
# Why: #2161 in Alexa global
'http://www.bbandt.com/',
# Why: #2162 in Alexa global
'http://www.williamhill.com/',
# Why: #2163 in Alexa global
'http://www.jeu.info/',
# Why: #2164 in Alexa global
'http://www.vrbo.com/',
# Why: #2165 in Alexa global
'http://www.arabseed.com/',
# Why: #2166 in Alexa global
'http://www.spielaffe.de/',
# Why: #2167 in Alexa global
'http://www.wykop.pl/',
# Why: #2168 in Alexa global
'http://www.name.com/',
# Why: #2169 in Alexa global
'http://www.web-opinions.com/',
# Why: #2170 in Alexa global
'http://www.ehowenespanol.com/',
# Why: #2171 in Alexa global
'http://www.uuzu.com/',
# Why: #2173 in Alexa global
'http://www.cafepress.com/',
# Why: #2174 in Alexa global
'http://www.beeline.ru/',
# Why: #2175 in Alexa global
'http://www.searchenginejournal.com/',
# Why: #2176 in Alexa global
'http://mafengwo.cn/',
# Why: #2177 in Alexa global
'http://www.webex.com/',
# Why: #2178 in Alexa global
'http://www.zerohedge.com/',
# Why: #2179 in Alexa global
'http://www.cityads.ru/',
# Why: #2180 in Alexa global
'http://www.columbia.edu/',
# Why: #2181 in Alexa global
'http://jia.com/',
# Why: #2182 in Alexa global
'http://www.tistory.com/',
# Why: #2183 in Alexa global
'http://www.100bestbuy.com/',
# Why: #2184 in Alexa global
'http://www.realitykings.com/',
# Why: #2185 in Alexa global
'http://www.shopify.com/',
# Why: #2186 in Alexa global
'http://www.gametop.com/',
# Why: #2187 in Alexa global
'http://www.eharmony.com/',
# Why: #2188 in Alexa global
'http://www.ngoisao.net/',
# Why: #2189 in Alexa global
'http://www.angieslist.com/',
# Why: #2190 in Alexa global
'http://www.grotal.com/',
# Why: #2191 in Alexa global
'http://www.manhunt.net/',
# Why: #2192 in Alexa global
'http://www.adslgate.com/',
# Why: #2193 in Alexa global
'http://www.demotywatory.pl/',
# Why: #2194 in Alexa global
'http://www.enfemenino.com/',
# Why: #2195 in Alexa global
'http://www.yallakora.com/',
# Why: #2196 in Alexa global
'http://www.careesma.in/',
# Why: #2197 in Alexa global
'http://www.draugiem.lv/',
# Why: #2198 in Alexa global
'http://www.greatandhra.com/',
# Why: #2199 in Alexa global
'http://www.lifescript.com/',
# Why: #2201 in Alexa global
'http://www.androidcentral.com/',
# Why: #2202 in Alexa global
'http://www.wiley.com/',
# Why: #2203 in Alexa global
'http://www.alot.com/',
# Why: #2204 in Alexa global
'http://www.10010.com/',
# Why: #2205 in Alexa global
'http://www.next.co.uk/',
# Why: #2206 in Alexa global
'http://115.com/',
# Why: #2207 in Alexa global
'http://www.omgpm.com/',
# Why: #2208 in Alexa global
'http://www.mycalendarbook.com/',
# Why: #2209 in Alexa global
'http://www.playxn.com/',
# Why: #2210 in Alexa global
'http://www.niksalehi.com/',
# Why: #2211 in Alexa global
'http://www.serviporno.com/',
# Why: #2212 in Alexa global
'http://www.poste.it/',
# Why: #2213 in Alexa global
'http://kimiss.com/',
# Why: #2214 in Alexa global
'http://www.bearshare.com/',
# Why: #2215 in Alexa global
'http://www.clickpoint.com/',
# Why: #2216 in Alexa global
'http://www.seek.com.au/',
# Why: #2217 in Alexa global
'http://www.bab.la/',
# Why: #2218 in Alexa global
'http://www.ads8.com/',
# Why: #2219 in Alexa global
'http://www.viewster.com/',
# Why: #2220 in Alexa global
'http://www.ideacellular.com/',
# Why: #2221 in Alexa global
'http://www.tympanus.net/',
# Why: #2222 in Alexa global
'http://www.wwwblogto.com/',
# Why: #2223 in Alexa global
'http://www.tblop.com/',
# Why: #2224 in Alexa global
'http://elong.com/',
# Why: #2225 in Alexa global
'http://www.funnyordie.com/',
# Why: #2226 in Alexa global
'http://www.radikal.ru/',
# Why: #2227 in Alexa global
'http://www.rk.com/',
# Why: #2228 in Alexa global
'http://www.alarab.net/',
# Why: #2229 in Alexa global
'http://www.willhaben.at/',
# Why: #2230 in Alexa global
'http://www.infoseek.co.jp/',
# Why: #2231 in Alexa global
'http://www.beyond.com/',
# Why: #2232 in Alexa global
'http://www.punchng.com/',
# Why: #2233 in Alexa global
'http://www.viglink.com/',
# Why: #2234 in Alexa global
'http://www.microsoftstore.com/',
# Why: #2235 in Alexa global
'http://www.tripleclicks.com/',
# Why: #2236 in Alexa global
'http://www.m1905.com/',
# Why: #2237 in Alexa global
'http://www.ofreegames.com/',
# Why: #2238 in Alexa global
'http://www.s2d6.com/',
# Why: #2239 in Alexa global
'http://www.360buy.com/',
# Why: #2240 in Alexa global
'http://www.rakuten.com/',
# Why: #2241 in Alexa global
'http://www.evite.com/',
# Why: #2242 in Alexa global
'http://www.kompasiana.com/',
# Why: #2243 in Alexa global
'http://www.dailycaller.com/',
# Why: #2246 in Alexa global
'http://www.holidaycheck.de/',
# Why: #2248 in Alexa global
'http://www.imvu.com/',
# Why: #2249 in Alexa global
'http://www.unfranchise.com.tw/',
# Why: #2250 in Alexa global
'http://www.nate.com/',
# Why: #2251 in Alexa global
'http://fnac.com/',
# Why: #2252 in Alexa global
'http://www.htc.com/',
# Why: #2253 in Alexa global
'http://www.savenkeep.com/',
# Why: #2254 in Alexa global
'http://www.alfabank.ru/',
# Why: #2255 in Alexa global
'http://www.zaycev.net/',
# Why: #2256 in Alexa global
'http://www.vidtomp3.com/',
# Why: #2257 in Alexa global
'http://www.eluniversal.com.mx/',
# Why: #2258 in Alexa global
'http://haiwainet.cn/',
# Why: #2259 in Alexa global
'http://www.theatlantic.com/',
# Why: #2260 in Alexa global
'http://www.gamigo.de/',
# Why: #2261 in Alexa global
'http://www.lolking.net/',
# Why: #2262 in Alexa global
'http://www.wer-kennt-wen.de/',
# Why: #2263 in Alexa global
'http://www.stern.de/',
# Why: #2264 in Alexa global
'http://sport1.de/',
# Why: #2265 in Alexa global
'http://www.goalunited.org/',
# Why: #2266 in Alexa global
'http://www.discogs.com/',
# Why: #2267 in Alexa global
'http://www.whirlpool.net.au/',
# Why: #2268 in Alexa global
'http://www.savefrom.net/',
# Why: #2269 in Alexa global
'http://www.eurosport.fr/',
# Why: #2270 in Alexa global
'http://www.juegosjuegos.com/',
# Why: #2271 in Alexa global
'http://www.open24news.tv/',
# Why: #2272 in Alexa global
'http://www.zozo.jp/',
# Why: #2273 in Alexa global
'http://sinaapp.com/',
# Why: #2274 in Alexa global
'http://www.fuq.com/',
# Why: #2275 in Alexa global
'http://www.index.hr/',
# Why: #2276 in Alexa global
'http://www.realpopbid.com/',
# Why: #2277 in Alexa global
'http://www.rollingstone.com/',
# Why: #2278 in Alexa global
'http://www.globaltestmarket.com/',
# Why: #2279 in Alexa global
'http://www.seopult.ru/',
# Why: #2280 in Alexa global
'http://www.wumii.com/',
# Why: #2281 in Alexa global
'http://www.ford.com/',
# Why: #2282 in Alexa global
'http://www.cabelas.com/',
# Why: #2283 in Alexa global
'http://www.securepaynet.net/',
# Why: #2284 in Alexa global
'http://www.zhibo8.cc/',
# Why: #2285 in Alexa global
'http://www.jiji.com/',
# Why: #2286 in Alexa global
'http://www.gezinti.com/',
# Why: #2287 in Alexa global
'http://www.meb.gov.tr/',
# Why: #2288 in Alexa global
'http://www.classifiedads.com/',
# Why: #2289 in Alexa global
'http://www.kitco.com/',
# Why: #2290 in Alexa global
'http://www.incredimail.com/',
# Why: #2291 in Alexa global
'http://www.esmas.com/',
# Why: #2292 in Alexa global
'http://www.soccerway.com/',
# Why: #2293 in Alexa global
'http://www.rivals.com/',
# Why: #2294 in Alexa global
'http://www.prezi.com/',
# Why: #2295 in Alexa global
'http://www.shopping.com/',
# Why: #2296 in Alexa global
'http://www.superjob.ru/',
# Why: #2297 in Alexa global
'http://chinaacc.com/',
# Why: #2298 in Alexa global
'http://www.amoureux.com/',
# Why: #2299 in Alexa global
'http://www.mysmartprice.com/',
# Why: #2300 in Alexa global
'http://www.eleconomista.es/',
# Why: #2301 in Alexa global
'http://www.mercola.com/',
# Why: #2302 in Alexa global
'http://www.imlive.com/',
# Why: #2303 in Alexa global
'http://www.teacup.com/',
# Why: #2304 in Alexa global
'http://www.modelmayhem.com/',
# Why: #2305 in Alexa global
'http://www.nic.ru/',
# Why: #2306 in Alexa global
'http://www.brazzersnetwork.com/',
# Why: #2307 in Alexa global
'http://www.everything.org.uk/',
# Why: #2308 in Alexa global
'http://www.bhg.com/',
# Why: #2309 in Alexa global
'http://www.longhoo.net/',
# Why: #2311 in Alexa global
'http://www.superpages.com/',
# Why: #2312 in Alexa global
'http://www.tny.cz/',
# Why: #2313 in Alexa global
'http://www.yourfilezone.com/',
# Why: #2314 in Alexa global
'http://www.tuan800.com/',
# Why: #2315 in Alexa global
'http://www.streev.com/',
# Why: #2316 in Alexa global
'http://www.sedty.com/',
# Why: #2317 in Alexa global
'http://www.bol.uol.com.br/',
# Why: #2318 in Alexa global
'http://www.boxofficemojo.com/',
# Why: #2319 in Alexa global
'http://www.hollyscoop.com/',
# Why: #2320 in Alexa global
'http://www.safecart.com/',
# Why: #2321 in Alexa global
'http://www.almogaz.com/',
# Why: #2322 in Alexa global
'http://www.cashnhits.com/',
# Why: #2323 in Alexa global
'http://www.wetplace.com/',
# Why: #2324 in Alexa global
'http://www.freepik.com/',
# Why: #2325 in Alexa global
'http://www.rarbg.com/',
# Why: #2326 in Alexa global
'http://www.xxxbunker.com/',
# Why: #2327 in Alexa global
'http://www.prchecker.info/',
# Why: #2328 in Alexa global
'http://www.halifax-online.co.uk/',
# Why: #2329 in Alexa global
'http://www.trafficfactory.biz/',
# Why: #2330 in Alexa global
'http://www.telecinco.es/',
# Why: #2331 in Alexa global
'http://www.searchtermresults.com/',
# Why: #2332 in Alexa global
'http://www.unam.mx/',
# Why: #2333 in Alexa global
'http://www.akhbar-elwatan.com/',
# Why: #2335 in Alexa global
'http://lynda.com/',
# Why: #2336 in Alexa global
'http://www.yougetlaid.com/',
# Why: #2337 in Alexa global
'http://www.smart.com.au/',
# Why: #2338 in Alexa global
'http://www.advfn.com/',
# Why: #2339 in Alexa global
'http://www.unicredit.it/',
# Why: #2340 in Alexa global
'http://www.zomato.com/',
# Why: #2341 in Alexa global
'http://www.flirt.com/',
# Why: #2342 in Alexa global
'http://netease.com/',
# Why: #2343 in Alexa global
'http://www.bnpparibas.net/',
# Why: #2344 in Alexa global
'http://www.elcomercio.pe/',
# Why: #2345 in Alexa global
'http://www.mathrubhumi.com/',
# Why: #2346 in Alexa global
'http://www.koyotesoft.com/',
# Why: #2347 in Alexa global
'http://www.filmix.net/',
# Why: #2348 in Alexa global
'http://www.xnxxhdtube.com/',
# Why: #2349 in Alexa global
'http://www.ennaharonline.com/',
# Why: #2350 in Alexa global
'http://www.junbi-tracker.com/',
# Why: #2351 in Alexa global
'http://www.buzzdock.com/',
# Why: #2352 in Alexa global
'http://www.emirates.com/',
# Why: #2353 in Alexa global
'http://wikiwiki.jp/',
# Why: #2354 in Alexa global
'http://www.vivanuncios.com.mx/',
# Why: #2355 in Alexa global
'http://www.infojobs.net/',
# Why: #2356 in Alexa global
'http://www.smi2.ru/',
# Why: #2357 in Alexa global
'http://www.lotterypost.com/',
# Why: #2358 in Alexa global
'http://www.bandcamp.com/',
# Why: #2359 in Alexa global
'http://www.ekstrabladet.dk/',
# Why: #2360 in Alexa global
'http://www.nownews.com/',
# Why: #2361 in Alexa global
'http://www.bc.vc/',
# Why: #2362 in Alexa global
'http://www.google.com.af/',
# Why: #2364 in Alexa global
'http://www.ulmart.ru/',
# Why: #2365 in Alexa global
'http://www.estadao.com.br/',
# Why: #2366 in Alexa global
'http://www.politico.com/',
# Why: #2367 in Alexa global
'http://kl688.com/',
# Why: #2368 in Alexa global
'http://www.resellerclub.com/',
# Why: #2369 in Alexa global
'http://www.whois.net/',
# Why: #2370 in Alexa global
'http://www.seobuilding.ru/',
# Why: #2371 in Alexa global
'http://www.t411.me/',
# Why: #2372 in Alexa global
'http://googlesyndication.com/',
# Why: #2373 in Alexa global
'http://delfi.lt/',
# Why: #2374 in Alexa global
'http://www.eqla3.com/',
# Why: #2375 in Alexa global
'http://www.ali213.net/',
# Why: #2376 in Alexa global
'http://www.jma.go.jp/',
# Why: #2377 in Alexa global
'http://www.xvideos.jp/',
# Why: #2378 in Alexa global
'http://www.fanpage.it/',
# Why: #2379 in Alexa global
'http://www.uptobox.com/',
# Why: #2380 in Alexa global
'http://www.shinobi.jp/',
# Why: #2381 in Alexa global
'http://www.google.jo/',
# Why: #2382 in Alexa global
'http://cncn.com/',
# Why: #2383 in Alexa global
'http://www.sme.sk/',
# Why: #2384 in Alexa global
'http://www.kinozal.tv/',
# Why: #2385 in Alexa global
'http://www.ceconline.com/',
# Why: #2386 in Alexa global
'http://www.billboard.com/',
# Why: #2387 in Alexa global
'http://www.citi.com/',
# Why: #2388 in Alexa global
'http://www.naughtyamerica.com/',
# Why: #2389 in Alexa global
'http://www.classmates.com/',
# Why: #2390 in Alexa global
'http://www.coursera.org/',
# Why: #2391 in Alexa global
'http://www.pingan.com/',
# Why: #2392 in Alexa global
'http://www.voanews.com/',
# Why: #2393 in Alexa global
'http://www.tankionline.com/',
# Why: #2394 in Alexa global
'http://www.jetblue.com/',
# Why: #2395 in Alexa global
'http://www.spainshtranslation.com/',
# Why: #2396 in Alexa global
'http://www.ebookbrowse.com/',
# Why: #2397 in Alexa global
'http://www.met-art.com/',
# Why: #2398 in Alexa global
'http://www.megafon.ru/',
# Why: #2399 in Alexa global
'http://www.quibids.com/',
# Why: #2400 in Alexa global
'http://www.prcm.jp/',
# Why: #2401 in Alexa global
'http://www.smartfren.com/',
# Why: #2402 in Alexa global
'http://www.cleartrip.com/',
# Why: #2403 in Alexa global
'http://www.pixmania.com/',
# Why: #2405 in Alexa global
'http://www.vivastreet.com/',
# Why: #2406 in Alexa global
'http://www.thegfnetwork.com/',
# Why: #2407 in Alexa global
'http://www.paytm.com/',
# Why: #2408 in Alexa global
'http://www.meinsextagebuch.net/',
# Why: #2409 in Alexa global
'http://www.memecenter.com/',
# Why: #2410 in Alexa global
'http://www.ixbt.com/',
# Why: #2411 in Alexa global
'http://www.dagbladet.no/',
# Why: #2412 in Alexa global
'http://www.basecamphq.com/',
# Why: #2413 in Alexa global
'http://www.chinatimes.com/',
# Why: #2414 in Alexa global
'http://www.bubblews.com/',
# Why: #2415 in Alexa global
'http://www.xtool.ru/',
# Why: #2416 in Alexa global
'http://yoho.cn/',
# Why: #2417 in Alexa global
'http://www.opodo.co.uk/',
# Why: #2418 in Alexa global
'http://www.hattrick.org/',
# Why: #2419 in Alexa global
'http://www.zopim.com/',
# Why: #2420 in Alexa global
'http://www.aol.co.uk/',
# Why: #2421 in Alexa global
'http://www.gazzetta.gr/',
# Why: #2422 in Alexa global
'http://www.18andabused.com/',
# Why: #2423 in Alexa global
'http://www.panasonic.jp/',
# Why: #2424 in Alexa global
'http://www.mcssl.com/',
# Why: #2425 in Alexa global
'http://www.economist.com/',
# Why: #2426 in Alexa global
'http://www.zeit.de/',
# Why: #2427 in Alexa global
'http://www.google.com.uy/',
# Why: #2428 in Alexa global
'http://www.pinoy-ako.info/',
# Why: #2429 in Alexa global
'http://www.lazada.co.id/',
# Why: #2430 in Alexa global
'http://www.filgoal.com/',
# Why: #2431 in Alexa global
'http://www.rozetka.com.ua/',
# Why: #2432 in Alexa global
'http://www.almesryoon.com/',
# Why: #2433 in Alexa global
'http://www.csmonitor.com/',
# Why: #2434 in Alexa global
'http://www.bizjournals.com/',
# Why: #2435 in Alexa global
'http://www.rackspace.com/',
# Why: #2436 in Alexa global
'http://www.webgozar.com/',
# Why: #2437 in Alexa global
'http://www.opencart.com/',
# Why: #2438 in Alexa global
'http://www.mediaplex.com/',
# Why: #2439 in Alexa global
'http://www.deutsche-bank.de/',
# Why: #2440 in Alexa global
'http://www.similarsites.com/',
# Why: #2441 in Alexa global
'http://www.sotmarket.ru/',
# Why: #2442 in Alexa global
'http://www.chatzum.com/',
# Why: #2443 in Alexa global
'http://www.huffingtonpost.co.uk/',
# Why: #2444 in Alexa global
'http://www.carwale.com/',
# Why: #2445 in Alexa global
'http://www.memez.com/',
# Why: #2446 in Alexa global
'http://www.hostmonster.com/',
# Why: #2447 in Alexa global
'http://www.muzofon.com/',
# Why: #2448 in Alexa global
'http://www.elephanttube.com/',
# Why: #2449 in Alexa global
'http://www.crunchbase.com/',
# Why: #2450 in Alexa global
'http://www.imhonet.ru/',
# Why: #2451 in Alexa global
'http://www.lusongsong.com/',
# Why: #2452 in Alexa global
'http://www.filmesonlinegratis.net/',
# Why: #2453 in Alexa global
'http://www.giaoduc.net.vn/',
# Why: #2454 in Alexa global
'http://www.manhub.com/',
# Why: #2455 in Alexa global
'http://www.tatadocomo.com/',
# Why: #2458 in Alexa global
'http://www.realitatea.net/',
# Why: #2459 in Alexa global
'http://www.freemp3x.com/',
# Why: #2460 in Alexa global
'http://www.freemail.hu/',
# Why: #2461 in Alexa global
'http://www.ganool.com/',
# Why: #2462 in Alexa global
'http://www.feedreader.com/',
# Why: #2463 in Alexa global
'http://www.sportsdirect.com/',
# Why: #2464 in Alexa global
'http://www.videolan.org/',
# Why: #2465 in Alexa global
'http://www.watchseries.lt/',
# Why: #2466 in Alexa global
'http://www.rotapost.ru/',
# Why: #2467 in Alexa global
'http://www.nwolb.com/',
# Why: #2468 in Alexa global
'http://www.searchquotes.com/',
# Why: #2469 in Alexa global
'http://www.kaspersky.com/',
# Why: #2470 in Alexa global
'http://www.go2cloud.org/',
# Why: #2471 in Alexa global
'http://www.grepolis.com/',
# Why: #2472 in Alexa global
'http://fh21.com.cn/',
# Why: #2473 in Alexa global
'http://www.profit-partner.ru/',
# Why: #2475 in Alexa global
'http://www.articlesbase.com/',
# Why: #2476 in Alexa global
'http://www.dns-shop.ru/',
# Why: #2477 in Alexa global
'http://www.radikal.com.tr/',
# Why: #2478 in Alexa global
'http://www.justjared.com/',
# Why: #2479 in Alexa global
'http://www.lancenet.com.br/',
# Why: #2480 in Alexa global
'http://www.mangapanda.com/',
# Why: #2481 in Alexa global
'http://www.theglobeandmail.com/',
# Why: #2483 in Alexa global
'http://www.ecollege.com/',
# Why: #2484 in Alexa global
'http://www.myanimelist.net/',
# Why: #2485 in Alexa global
'http://www.immoral.jp/',
# Why: #2486 in Alexa global
'http://www.fotomac.com.tr/',
# Why: #2487 in Alexa global
'http://imanhua.com/',
# Why: #2488 in Alexa global
'http://www.travelzoo.com/',
# Why: #2489 in Alexa global
'http://www.jjwxc.net/',
# Why: #2490 in Alexa global
'http://www.q.gs/',
# Why: #2491 in Alexa global
'http://www.naaptol.com/',
# Why: #2492 in Alexa global
'http://www.sambaporno.com/',
# Why: #2493 in Alexa global
'http://www.macrojuegos.com/',
# Why: #2494 in Alexa global
'http://www.ooo-sex.com/',
# Why: #2495 in Alexa global
'http://www.fab.com/',
# Why: #2496 in Alexa global
'http://www.roflzone.com/',
# Why: #2497 in Alexa global
'http://www.searchcompletion.com/',
# Why: #2498 in Alexa global
'http://www.jezebel.com/',
# Why: #2499 in Alexa global
'http://bizdec.ru/',
# Why: #2500 in Alexa global
'http://www.torrentino.com/',
# Why: #2501 in Alexa global
'http://www.multitran.ru/',
# Why: #2502 in Alexa global
'http://www.tune-up.com/',
# Why: #2503 in Alexa global
'http://www.sparkpeople.com/',
# Why: #2505 in Alexa global
'http://www.desi-tashan.com/',
# Why: #2506 in Alexa global
'http://www.mashreghnews.ir/',
# Why: #2507 in Alexa global
'http://www.talktalk.co.uk/',
# Why: #2508 in Alexa global
'http://www.hinkhoj.com/',
# Why: #2509 in Alexa global
'http://www.20minutes.fr/',
# Why: #2510 in Alexa global
'http://www.sulia.com/',
# Why: #2511 in Alexa global
'http://www.icims.com/',
# Why: #2512 in Alexa global
'http://www.dizi-mag.com/',
# Why: #2513 in Alexa global
'http://www.webaslan.com/',
# Why: #2514 in Alexa global
'http://www.en.wordpress.com/',
# Why: #2515 in Alexa global
'http://www.funmoods.com/',
# Why: #2516 in Alexa global
'http://www.softgozar.com/',
# Why: #2517 in Alexa global
'http://www.starwoodhotels.com/',
# Why: #2518 in Alexa global
'http://www.studiopress.com/',
# Why: #2519 in Alexa global
'http://www.click.in/',
# Why: #2520 in Alexa global
'http://www.meetcheap.com/',
# Why: #2521 in Alexa global
'http://www.angel-live.com/',
# Why: #2522 in Alexa global
'http://www.beforeitsnews.com/',
# Why: #2524 in Alexa global
'http://www.trello.com/',
# Why: #2525 in Alexa global
'http://www.icontact.com/',
# Why: #2526 in Alexa global
'http://www.prlog.org/',
# Why: #2527 in Alexa global
'http://www.incentria.com/',
# Why: #2528 in Alexa global
'http://www.bouyguestelecom.fr/',
# Why: #2529 in Alexa global
'http://www.dstv.com/',
# Why: #2530 in Alexa global
'http://www.arstechnica.com/',
# Why: #2531 in Alexa global
'http://www.diigo.com/',
# Why: #2532 in Alexa global
'http://www.consumers-research.com/',
# Why: #2533 in Alexa global
'http://www.metaffiliation.com/',
# Why: #2534 in Alexa global
'http://www.telekom.de/',
# Why: #2535 in Alexa global
'http://www.izlesene.com/',
# Why: #2536 in Alexa global
'http://www.newsit.gr/',
# Why: #2537 in Alexa global
'http://www.fuckingawesome.com/',
# Why: #2538 in Alexa global
'http://www.osym.gov.tr/',
# Why: #2539 in Alexa global
'http://www.svyaznoy.ru/',
# Why: #2540 in Alexa global
'http://www.watchfreemovies.ch/',
# Why: #2541 in Alexa global
'http://www.gumtree.pl/',
# Why: #2542 in Alexa global
'http://www.sportbox.ru/',
# Why: #2543 in Alexa global
'http://www.reserverunessai.com/',
# Why: #2544 in Alexa global
'http://www.hsbc.com.hk/',
# Why: #2546 in Alexa global
'http://www.cricbuzz.com/',
# Why: #2547 in Alexa global
'http://www.djelfa.info/',
# Why: #2548 in Alexa global
'http://www.nouvelobs.com/',
# Why: #2549 in Alexa global
'http://www.aruba.it/',
# Why: #2550 in Alexa global
'http://www.homes.com/',
# Why: #2551 in Alexa global
'http://www.allezleslions.com/',
# Why: #2552 in Alexa global
'http://www.orkut.com.br/',
# Why: #2553 in Alexa global
'http://www.aionfreetoplay.com/',
# Why: #2554 in Alexa global
'http://www.academia.edu/',
# Why: #2555 in Alexa global
'http://www.blogosfera.uol.com.br/',
# Why: #2556 in Alexa global
'http://www.consumerreports.org/',
# Why: #2557 in Alexa global
'http://www.ilsole24ore.com/',
# Why: #2558 in Alexa global
'http://www.sephora.com/',
# Why: #2559 in Alexa global
'http://www.lds.org/',
# Why: #2560 in Alexa global
'http://vmall.com/',
# Why: #2561 in Alexa global
'http://www.ultimasnoticias.com.ve/',
# Why: #2562 in Alexa global
'http://www.healthgrades.com/',
# Why: #2563 in Alexa global
'http://www.imgbox.com/',
# Why: #2564 in Alexa global
'http://www.dlsite.com/',
# Why: #2565 in Alexa global
'http://www.whitesmoke.com/',
# Why: #2566 in Alexa global
'http://www.thenextweb.com/',
# Why: #2567 in Alexa global
'http://www.qire123.com/',
# Why: #2568 in Alexa global
'http://www.peeplo.com/',
# Why: #2569 in Alexa global
'http://www.chitika.com/',
# Why: #2570 in Alexa global
'http://www.alwafd.org/',
# Why: #2571 in Alexa global
'http://www.phonearena.com/',
# Why: #2572 in Alexa global
'http://www.ovh.com/',
# Why: #2573 in Alexa global
'http://www.tusfiles.net/',
# Why: #2574 in Alexa global
'http://www.18schoolgirlz.com/',
# Why: #2575 in Alexa global
'http://www.bongacams.com/',
# Why: #2576 in Alexa global
'http://www.home.pl/',
# Why: #2577 in Alexa global
'http://www.footmercato.net/',
# Why: #2579 in Alexa global
'http://www.sprashivai.ru/',
# Why: #2580 in Alexa global
'http://www.megafilmeshd.net/',
# Why: #2581 in Alexa global
'http://www.premium-display.com/',
# Why: #2582 in Alexa global
'http://www.clickey.com/',
# Why: #2584 in Alexa global
'http://www.tokyo-tube.com/',
# Why: #2585 in Alexa global
'http://www.watch32.com/',
# Why: #2586 in Alexa global
'http://www.pornolab.net/',
# Why: #2587 in Alexa global
'http://www.timewarnercable.com/',
# Why: #2588 in Alexa global
'http://www.naturalnews.com/',
# Why: #2589 in Alexa global
'http://www.afimet.com/',
# Why: #2590 in Alexa global
'http://www.telderi.ru/',
# Why: #2591 in Alexa global
'http://www.ioffer.com/',
# Why: #2592 in Alexa global
'http://www.lapatilla.com/',
# Why: #2593 in Alexa global
'http://www.livetv.ru/',
# Why: #2594 in Alexa global
'http://www.cloudflare.com/',
# Why: #2595 in Alexa global
'http://www.lupoporno.com/',
# Why: #2597 in Alexa global
'http://www.nhaccuatui.com/',
# Why: #2598 in Alexa global
'http://www.thepostgame.com/',
# Why: #2599 in Alexa global
'http://www.ipage.com/',
# Why: #2600 in Alexa global
'http://www.banesconline.com/',
# Why: #2601 in Alexa global
'http://www.cdc.gov/',
# Why: #2602 in Alexa global
'http://www.adonweb.ru/',
# Why: #2603 in Alexa global
'http://www.zone-telechargement.com/',
# Why: #2604 in Alexa global
'http://www.intellicast.com/',
# Why: #2605 in Alexa global
'http://www.uloz.to/',
# Why: #2606 in Alexa global
'http://www.pikabu.ru/',
# Why: #2607 in Alexa global
'http://www.megogo.net/',
# Why: #2608 in Alexa global
'http://www.wenxuecity.com/',
# Why: #2609 in Alexa global
'http://www.xml-sitemaps.com/',
# Why: #2610 in Alexa global
'http://www.webdunia.com/',
# Why: #2611 in Alexa global
'http://www.justhost.com/',
# Why: #2612 in Alexa global
'http://www.starbucks.com/',
# Why: #2613 in Alexa global
'http://www.wargaming.net/',
# Why: #2614 in Alexa global
'http://www.hugedomains.com/',
# Why: #2615 in Alexa global
'http://magicbricks.com/',
# Why: #2616 in Alexa global
'http://gigporno.com/',
# Why: #2617 in Alexa global
'http://www.rikunabi.com/',
# Why: #2618 in Alexa global
'http://www.51auto.com/',
# Why: #2619 in Alexa global
'http://www.warriorplus.com/',
# Why: #2620 in Alexa global
'http://www.gudvin.tv/',
# Why: #2621 in Alexa global
'http://www.bigmir.net/',
# Why: #2622 in Alexa global
'http://twipple.jp/',
# Why: #2623 in Alexa global
'http://www.ansa.it/',
# Why: #2624 in Alexa global
'http://www.standardbank.co.za/',
# Why: #2625 in Alexa global
'http://www.toshiba.com/',
# Why: #2626 in Alexa global
'http://www.xinnet.com/',
# Why: #2627 in Alexa global
'http://www.geico.com/',
# Why: #2629 in Alexa global
'http://www.funnyjunk.com/',
# Why: #2630 in Alexa global
'http://affaritaliani.it/',
# Why: #2631 in Alexa global
'http://www.cityheaven.net/',
# Why: #2632 in Alexa global
'http://www.tubewolf.com/',
# Why: #2633 in Alexa global
'http://www.google.org/',
# Why: #2634 in Alexa global
'http://www.ad.nl/',
# Why: #2635 in Alexa global
'http://www.tutorialspoint.com/',
# Why: #2638 in Alexa global
'http://www.uidai.gov.in/',
# Why: #2639 in Alexa global
'http://www.everydayhealth.com/',
# Why: #2640 in Alexa global
'http://www.jzip.com/',
# Why: #2641 in Alexa global
'http://www.lolspotsarticles.com/',
# Why: #2642 in Alexa global
'http://www.ana.co.jp/',
# Why: #2643 in Alexa global
'http://www.rueducommerce.fr/',
# Why: #2644 in Alexa global
'http://www.lvmama.com/',
# Why: #2645 in Alexa global
'http://www.roboform.com/',
# Why: #2646 in Alexa global
'http://www.zoznam.sk/',
# Why: #2647 in Alexa global
'http://www.livesmi.com/',
# Why: #2648 in Alexa global
'http://www.die-boersenformel.com/',
# Why: #2649 in Alexa global
'http://www.watchcartoononline.com/',
# Why: #2650 in Alexa global
'http://www.abclocal.go.com/',
# Why: #2651 in Alexa global
'http://www.techrepublic.com/',
# Why: #2652 in Alexa global
'http://www.just-fuck.com/',
# Why: #2653 in Alexa global
'http://www.camster.com/',
# Why: #2654 in Alexa global
'http://www.akairan.com/',
# Why: #2655 in Alexa global
'http://www.yeslibertin.com/',
# Why: #2656 in Alexa global
'http://www.abc.go.com/',
# Why: #2657 in Alexa global
'http://www.searchtherightwords.com/',
# Why: #2658 in Alexa global
'http://www.scotiabank.com/',
# Why: #2659 in Alexa global
'http://www.justclick.ru/',
# Why: #2660 in Alexa global
'http://www.douguo.com/',
# Why: #2661 in Alexa global
'http://www.discover.com/',
# Why: #2662 in Alexa global
'http://www.britishairways.com/',
# Why: #2663 in Alexa global
'http://www.mobafire.com/',
# Why: #2664 in Alexa global
'http://www.gi-akademie.ning.com/',
# Why: #2666 in Alexa global
'http://www.desirulez.net/',
# Why: #2667 in Alexa global
'http://www.qiushibaike.com/',
# Why: #2668 in Alexa global
'http://www.moonbasa.com/',
# Why: #2669 in Alexa global
'http://www.all.biz/',
# Why: #2670 in Alexa global
'http://www.tbs.co.jp/',
# Why: #2671 in Alexa global
'http://www.springer.com/',
# Why: #2672 in Alexa global
'http://www.emai.com/',
# Why: #2673 in Alexa global
'http://www.deadspin.com/',
# Why: #2674 in Alexa global
'http://www.hulkshare.com/',
# Why: #2675 in Alexa global
'http://www.fast-torrent.ru/',
# Why: #2676 in Alexa global
'http://www.oriflame.com/',
# Why: #2677 in Alexa global
'http://www.imgchili.net/',
# Why: #2678 in Alexa global
'http://www.mega-juegos.mx/',
# Why: #2679 in Alexa global
'http://www.gyazo.com/',
# Why: #2680 in Alexa global
'http://www.persianv.com/',
# Why: #2681 in Alexa global
'http://www.adk2.com/',
# Why: #2682 in Alexa global
'http://www.ingbank.pl/',
# Why: #2683 in Alexa global
'http://www.nationalconsumercenter.com/',
# Why: #2684 in Alexa global
'http://www.xxxkinky.com/',
# Why: #2685 in Alexa global
'http://www.mywot.com/',
# Why: #2686 in Alexa global
'http://www.gaymaletube.com/',
# Why: #2687 in Alexa global
'http://www.1tv.ru/',
# Why: #2688 in Alexa global
'http://www.manutd.com/',
# Why: #2689 in Alexa global
'http://www.merchantcircle.com/',
# Why: #2691 in Alexa global
'http://www.canalblog.com/',
# Why: #2692 in Alexa global
'http://www.capitalone360.com/',
# Why: #2693 in Alexa global
'http://www.tlbb8.com/',
# Why: #2694 in Alexa global
'http://www.softonic.fr/',
# Why: #2695 in Alexa global
'http://www.ccavenue.com/',
# Why: #2696 in Alexa global
'http://www.vector.co.jp/',
# Why: #2697 in Alexa global
'http://www.tyroodr.com/',
# Why: #2698 in Alexa global
'http://exam8.com/',
# Why: #2699 in Alexa global
'http://www.allmusic.com/',
# Why: #2700 in Alexa global
'http://www.stubhub.com/',
# Why: #2701 in Alexa global
'http://www.arcor.de/',
# Why: #2702 in Alexa global
'http://www.yolasite.com/',
# Why: #2703 in Alexa global
'http://www.haraj.com.sa/',
# Why: #2704 in Alexa global
'http://www.mypopup.ir/',
# Why: #2705 in Alexa global
'http://www.memurlar.net/',
# Why: #2706 in Alexa global
'http://www.smugmug.com/',
# Why: #2707 in Alexa global
'http://www.filefactory.com/',
# Why: #2708 in Alexa global
'http://www.fantasti.cc/',
# Why: #2709 in Alexa global
'http://www.bokra.net/',
# Why: #2710 in Alexa global
'http://www.goarticles.com/',
# Why: #2711 in Alexa global
'http://www.empowernetwork.com/2Se8w/',
# Why: #2712 in Alexa global
'http://www.moneysavingexpert.com/',
# Why: #2713 in Alexa global
'http://www.donga.com/',
# Why: #2714 in Alexa global
'http://www.lastminute.com/',
# Why: #2715 in Alexa global
'http://www.xkcd.com/',
# Why: #2716 in Alexa global
'http://www.sou300.com/',
# Why: #2717 in Alexa global
'http://www.magnovideo.com/',
# Why: #2718 in Alexa global
'http://www.inquirer.net/',
# Why: #2719 in Alexa global
'http://www.phoenix.edu/',
# Why: #2721 in Alexa global
'http://www.videogenesis.com/',
# Why: #2722 in Alexa global
'http://www.thestar.com/',
# Why: #2723 in Alexa global
'http://www.tripadvisor.es/',
# Why: #2724 in Alexa global
'http://www.blankrefer.com/',
# Why: #2725 in Alexa global
'http://www.yle.fi/',
# Why: #2726 in Alexa global
'http://www.beamtele.com/',
# Why: #2727 in Alexa global
'http://www.oanda.com/',
# Why: #2728 in Alexa global
'http://www.yaplog.jp/',
# Why: #2729 in Alexa global
'http://www.iheart.com/',
# Why: #2730 in Alexa global
'http://www.google.co.tz/',
# Why: #2731 in Alexa global
'http://www.stargazete.com/',
# Why: #2732 in Alexa global
'http://www.bossip.com/',
# Why: #2733 in Alexa global
'http://www.defaultsear.ch/',
# Why: #2734 in Alexa global
'http://www.thaiseoboard.com/',
# Why: #2735 in Alexa global
'http://www.qinbei.com/',
# Why: #2736 in Alexa global
'http://www.ninisite.com/',
# Why: #2737 in Alexa global
'http://www.j.gs/',
# Why: #2738 in Alexa global
'http://www.xinmin.cn/',
# Why: #2739 in Alexa global
'http://www.nos.nl/',
# Why: #2740 in Alexa global
'http://www.qualtrics.com/',
# Why: #2741 in Alexa global
'http://www.kommersant.ru/',
# Why: #2743 in Alexa global
'http://www.urban-rivals.com/',
# Why: #2744 in Alexa global
'http://www.computerbild.de/',
# Why: #2745 in Alexa global
'http://www.fararu.com/',
# Why: #2746 in Alexa global
'http://www.menshealth.com/',
# Why: #2747 in Alexa global
'http://www.jobstreet.com/',
# Why: #2749 in Alexa global
'http://www.rbcroyalbank.com/',
# Why: #2750 in Alexa global
'http://www.inmotionhosting.com/',
# Why: #2751 in Alexa global
'http://www.surveyrouter.com/',
# Why: #2752 in Alexa global
'http://www.kankanews.com/',
# Why: #2753 in Alexa global
'http://www.aol.de/',
# Why: #2754 in Alexa global
'http://www.bol.com/',
# Why: #2755 in Alexa global
'http://www.datpiff.com/',
# Why: #2757 in Alexa global
'http://mplife.com/',
# Why: #2758 in Alexa global
'http://www.sale-fire.com/',
# Why: #2759 in Alexa global
'http://www.inbox.lv/',
# Why: #2760 in Alexa global
'http://www.offeratum.com/',
# Why: #2761 in Alexa global
'http://www.pandora.tv/',
# Why: #2762 in Alexa global
'http://www.eltiempo.com/',
# Why: #2763 in Alexa global
'http://www.indiarailinfo.com/',
# Why: #2764 in Alexa global
'http://www.solidtrustpay.com/',
# Why: #2765 in Alexa global
'http://www.warthunder.ru/',
# Why: #2766 in Alexa global
'http://www.kuronekoyamato.co.jp/',
# Why: #2767 in Alexa global
'http://www.novamov.com/',
# Why: #2768 in Alexa global
'http://www.folkd.com/',
# Why: #2769 in Alexa global
'http://www.envato.com/',
# Why: #2770 in Alexa global
'http://www.wetpaint.com/',
# Why: #2771 in Alexa global
'http://www.tempo.co/',
# Why: #2772 in Alexa global
'http://www.howtogeek.com/',
# Why: #2773 in Alexa global
'http://www.foundationapi.com/',
# Why: #2774 in Alexa global
'http://www.zjol.com.cn/',
# Why: #2775 in Alexa global
'http://www.care2.com/',
# Why: #2776 in Alexa global
'http://www.bendibao.com/',
# Why: #2777 in Alexa global
'http://www.mazika2day.com/',
# Why: #2779 in Alexa global
'http://www.asda.com/',
# Why: #2780 in Alexa global
'http://www.nowvideo.ch/',
# Why: #2781 in Alexa global
'http://www.hiapk.com/',
# Why: #2782 in Alexa global
'http://17u.com/',
# Why: #2783 in Alexa global
'http://www.tutu.ru/',
# Why: #2784 in Alexa global
'http://www.ncdownloader.com/',
# Why: #2785 in Alexa global
'http://www.warez-bb.org/',
# Why: #2786 in Alexa global
'http://www.jsoftj.com/',
# Why: #2787 in Alexa global
'http://www.batepapo.uol.com.br/',
# Why: #2788 in Alexa global
'http://www.xmarks.com/',
# Why: #2789 in Alexa global
'http://www.36kr.com/',
# Why: #2790 in Alexa global
'http://www.runetki.com/',
# Why: #2791 in Alexa global
'http://www.quoka.de/',
# Why: #2792 in Alexa global
'http://www.heureka.cz/',
# Why: #2793 in Alexa global
'http://www.sbisec.co.jp/',
# Why: #2794 in Alexa global
'http://www.monografias.com/',
# Why: #2796 in Alexa global
'http://www.zhenai.com/',
# Why: #2797 in Alexa global
'http://www.4porn.com/',
# Why: #2798 in Alexa global
'http://www.antena3.com/',
# Why: #2799 in Alexa global
'http://lintas.me/',
# Why: #2800 in Alexa global
'http://www.seroundtable.com/',
# Why: #2802 in Alexa global
'http://www.e1.ru/',
# Why: #2803 in Alexa global
'http://www.berkeley.edu/',
# Why: #2804 in Alexa global
'http://www.officedepot.com/',
# Why: #2805 in Alexa global
'http://www.myflorida.com/',
# Why: #2806 in Alexa global
'http://www.parispornmovies.com/',
# Why: #2807 in Alexa global
'http://www.uniqlo.com/',
# Why: #2808 in Alexa global
'http://www.topky.sk/',
# Why: #2809 in Alexa global
'http://www.lumovies.com/',
# Why: #2810 in Alexa global
'http://www.buysellads.com/',
# Why: #2811 in Alexa global
'http://www.stirileprotv.ro/',
# Why: #2812 in Alexa global
'http://www.scottrade.com/',
# Why: #2813 in Alexa global
'http://www.tiboo.cn/',
# Why: #2814 in Alexa global
'http://www.mmtrends.net/',
# Why: #2815 in Alexa global
'http://www.wholesale-dress.net/',
# Why: #2816 in Alexa global
'http://www.metacritic.com/',
# Why: #2817 in Alexa global
'http://www.pichunter.com/',
# Why: #2818 in Alexa global
'http://www.moneybookers.com/',
# Why: #2819 in Alexa global
'http://www.idealista.com/',
# Why: #2820 in Alexa global
'http://www.buzzle.com/',
# Why: #2821 in Alexa global
'http://www.rcom.co.in/',
# Why: #2822 in Alexa global
'http://www.weightwatchers.com/',
# Why: #2823 in Alexa global
'http://www.itv.com/',
# Why: #2824 in Alexa global
'http://www.inilah.com/',
# Why: #2825 in Alexa global
'http://www.vic.gov.au/',
# Why: #2826 in Alexa global
'http://www.prom.ua/',
# Why: #2827 in Alexa global
'http://www.with2.net/',
# Why: #2828 in Alexa global
'http://www.suumo.jp/',
# Why: #2830 in Alexa global
'http://www.doodle.com/',
# Why: #2831 in Alexa global
'http://www.trafficbroker.com/',
# Why: #2832 in Alexa global
'http://www.h33t.com/',
# Why: #2833 in Alexa global
'http://www.avaaz.org/',
# Why: #2834 in Alexa global
'http://www.maultalk.com/',
# Why: #2835 in Alexa global
'http://www.bmo.com/',
# Why: #2836 in Alexa global
'http://www.nerdbux.com/',
# Why: #2837 in Alexa global
'http://www.abnamro.nl/',
# Why: #2838 in Alexa global
'http://www.didigames.com/',
# Why: #2839 in Alexa global
'http://www.pornorama.com/',
# Why: #2840 in Alexa global
'http://www.forumotion.com/',
# Why: #2841 in Alexa global
'http://www.woman.ru/',
# Why: #2843 in Alexa global
'http://www.thaivisa.com/',
# Why: #2844 in Alexa global
'http://www.lexpress.fr/',
# Why: #2845 in Alexa global
'http://www.forumcommunity.net/',
# Why: #2846 in Alexa global
'http://www.regions.com/',
# Why: #2847 in Alexa global
'http://www.sf-express.com/',
# Why: #2848 in Alexa global
'http://www.donkeymails.com/',
# Why: #2849 in Alexa global
'http://www.clubic.com/',
# Why: #2850 in Alexa global
'http://www.aucfan.com/',
# Why: #2851 in Alexa global
'http://www.enterfactory.com/',
# Why: #2852 in Alexa global
'http://www.yandex.com/',
# Why: #2853 in Alexa global
'http://www.iherb.com/',
# Why: #2854 in Alexa global
'http://www.in.gr/',
# Why: #2855 in Alexa global
'http://www.olx.pt/',
# Why: #2856 in Alexa global
'http://www.fbdownloader.com/',
# Why: #2857 in Alexa global
'http://www.autoscout24.it/',
# Why: #2858 in Alexa global
'http://www.siteground.com/',
# Why: #2859 in Alexa global
'http://www.psicofxp.com/',
# Why: #2860 in Alexa global
'http://www.persiangig.com/',
# Why: #2861 in Alexa global
'http://www.metroer.com/',
# Why: #2862 in Alexa global
'http://www.tokopedia.com/',
# Why: #2863 in Alexa global
'http://www.seccam.info/',
# Why: #2864 in Alexa global
'http://www.sport-express.ru/',
# Why: #2865 in Alexa global
'http://www.vodafone.it/',
# Why: #2866 in Alexa global
'http://www.blekko.com/',
# Why: #2867 in Alexa global
'http://www.entekhab.ir/',
# Why: #2868 in Alexa global
'http://www.expressen.se/',
# Why: #2869 in Alexa global
'http://www.zalando.fr/',
# Why: #2870 in Alexa global
'http://525j.com.cn/',
# Why: #2871 in Alexa global
'http://www.hawaaworld.com/',
# Why: #2872 in Alexa global
'http://www.freeonlinegames.com/',
# Why: #2873 in Alexa global
'http://www.google.com.lb/',
# Why: #2874 in Alexa global
'http://www.oricon.co.jp/',
# Why: #2875 in Alexa global
'http://www.apple.com.cn/',
# Why: #2876 in Alexa global
'http://www.ab-in-den-urlaub.de/',
# Why: #2877 in Alexa global
'http://www.android4tw.com/',
# Why: #2879 in Alexa global
'http://www.alriyadh.com/',
# Why: #2880 in Alexa global
'http://www.drugstore.com/',
# Why: #2881 in Alexa global
'http://www.iobit.com/',
# Why: #2882 in Alexa global
'http://www.rei.com/',
# Why: #2883 in Alexa global
'http://www.racing-games.com/',
# Why: #2884 in Alexa global
'http://www.mommyfucktube.com/',
# Why: #2885 in Alexa global
'http://www.pideo.net/',
# Why: #2886 in Alexa global
'http://www.gogoanime.com/',
# Why: #2887 in Alexa global
'http://www.avaxho.me/',
# Why: #2888 in Alexa global
'http://www.christianmingle.com/',
# Why: #2889 in Alexa global
'http://www.activesearchresults.com/',
# Why: #2890 in Alexa global
'http://www.trendsonline.biz/',
# Why: #2891 in Alexa global
'http://www.planetsuzy.org/',
# Why: #2892 in Alexa global
'http://www.rubias19.com/',
# Why: #2893 in Alexa global
'http://www.cleverbridge.com/',
# Why: #2894 in Alexa global
'http://www.jeevansathi.com/',
# Why: #2895 in Alexa global
'http://www.washingtontimes.com/',
# Why: #2896 in Alexa global
'http://www.lcl.fr/',
# Why: #2897 in Alexa global
'http://www.98ia.com/',
# Why: #2899 in Alexa global
'http://www.mercadolibre.com.co/',
# Why: #2900 in Alexa global
'http://www.caijing.com.cn/',
# Why: #2902 in Alexa global
'http://www.n-tv.de/',
# Why: #2903 in Alexa global
'http://www.divyabhaskar.co.in/',
# Why: #2905 in Alexa global
'http://www.airbnb.com/',
# Why: #2907 in Alexa global
'http://www.mybrowserbar.com/',
# Why: #2908 in Alexa global
'http://www.travian.com/',
# Why: #2909 in Alexa global
'http://www.autoblog.com/',
# Why: #2910 in Alexa global
'http://www.blesk.cz/',
# Why: #2911 in Alexa global
'http://www.playboy.com/',
# Why: #2912 in Alexa global
'http://www.p30download.com/',
# Why: #2913 in Alexa global
'http://www.pazienti.net/',
# Why: #2914 in Alexa global
'http://www.uast.ac.ir/',
# Why: #2915 in Alexa global
'http://www.logsoku.com/',
# Why: #2916 in Alexa global
'http://www.zedge.net/',
# Why: #2917 in Alexa global
'http://www.creditmutuel.fr/',
# Why: #2918 in Alexa global
'http://www.absa.co.za/',
# Why: #2919 in Alexa global
'http://www.milliyet.tv/',
# Why: #2920 in Alexa global
'http://www.jiathis.com/',
# Why: #2921 in Alexa global
'http://www.liverpoolfc.tv/',
# Why: #2922 in Alexa global
'http://www.104.com.tw/',
# Why: #2923 in Alexa global
'http://www.dospy.com/',
# Why: #2924 in Alexa global
'http://www.ems.com.cn/',
# Why: #2925 in Alexa global
'http://www.calameo.com/',
# Why: #2926 in Alexa global
'http://www.netsuite.com/',
# Why: #2927 in Alexa global
'http://www.angelfire.com/',
# Why: #2929 in Alexa global
'http://www.snagajob.com/',
# Why: #2930 in Alexa global
'http://www.hollywoodlife.com/',
# Why: #2931 in Alexa global
'http://www.techtudo.com.br/',
# Why: #2932 in Alexa global
'http://www.payserve.com/',
# Why: #2933 in Alexa global
'http://www.portalnet.cl/',
# Why: #2934 in Alexa global
'http://www.worldadult-videos.info/',
# Why: #2935 in Alexa global
'http://www.indianpornvideos.com/',
# Why: #2936 in Alexa global
'http://www.france24.com/',
# Why: #2937 in Alexa global
'http://www.discuss.com.hk/',
# Why: #2938 in Alexa global
'http://www.theplanet.com/',
# Why: #2939 in Alexa global
'http://www.advego.ru/',
# Why: #2940 in Alexa global
'http://www.dion.ne.jp/',
# Why: #2941 in Alexa global
'http://starbaby.cn/',
# Why: #2942 in Alexa global
'http://www.eltiempo.es/',
# Why: #2943 in Alexa global
'http://www.55tuan.com/',
# Why: #2944 in Alexa global
'http://www.snopes.com/',
# Why: #2945 in Alexa global
'http://www.startnow.com/',
# Why: #2946 in Alexa global
'http://www.tucarro.com/',
# Why: #2947 in Alexa global
'http://www.skyscanner.net/',
# Why: #2948 in Alexa global
'http://www.wchonline.com/',
# Why: #2949 in Alexa global
'http://www.gaadi.com/',
# Why: #2950 in Alexa global
'http://www.lindaikeji.blogspot.com/',
# Why: #2952 in Alexa global
'http://www.keywordblocks.com/',
# Why: #2953 in Alexa global
'http://www.apsense.com/',
# Why: #2954 in Alexa global
'http://www.avangate.com/',
# Why: #2955 in Alexa global
'http://www.gandul.info/',
# Why: #2956 in Alexa global
'http://www.google.com.gh/',
# Why: #2957 in Alexa global
'http://www.mybigcommerce.com/',
# Why: #2958 in Alexa global
'http://www.homeaway.com/',
# Why: #2959 in Alexa global
'http://www.wikitravel.org/',
# Why: #2960 in Alexa global
'http://www.etxt.ru/',
# Why: #2961 in Alexa global
'http://www.zerx.ru/',
# Why: #2962 in Alexa global
'http://www.sidereel.com/',
# Why: #2963 in Alexa global
'http://www.edreams.es/',
# Why: #2964 in Alexa global
'http://www.india-forums.com/',
# Why: #2966 in Alexa global
'http://www.infonews.com/',
# Why: #2967 in Alexa global
'http://www.zoominfo.com/',
# Why: #2968 in Alexa global
'http://www.stylebistro.com/',
# Why: #2969 in Alexa global
'http://www.dominos.com/',
# Why: #2970 in Alexa global
'http://591hx.com/',
# Why: #2971 in Alexa global
'http://www.authorize.net/',
# Why: #2972 in Alexa global
'http://www.61baobao.com/',
# Why: #2973 in Alexa global
'http://www.digitalspy.co.uk/',
# Why: #2974 in Alexa global
'http://www.godvine.com/',
# Why: #2975 in Alexa global
'http://www.rednowtube.com/',
# Why: #2976 in Alexa global
'http://www.sony.jp/',
# Why: #2977 in Alexa global
'http://www.appbank.net/',
# Why: #2978 in Alexa global
'http://www.woozgo.fr/',
# Why: #2979 in Alexa global
'http://www.expireddomains.net/',
# Why: #2980 in Alexa global
'http://www.my-uq.com/',
# Why: #2981 in Alexa global
'http://www.peliculasyonkis.com/',
# Why: #2982 in Alexa global
'http://www.forumfree.it/',
# Why: #2983 in Alexa global
'http://www.shangdu.com/',
# Why: #2984 in Alexa global
'http://www.startmyripple.com/',
# Why: #2985 in Alexa global
'http://www.hottube.me/',
# Why: #2986 in Alexa global
'http://www.members.webs.com/',
# Why: #2987 in Alexa global
'http://www.blick.ch/',
# Why: #2988 in Alexa global
'http://www.google.cm/',
# Why: #2989 in Alexa global
'http://iautos.cn/',
# Why: #2990 in Alexa global
'http://www.tomtom.com/',
# Why: #2992 in Alexa global
'http://www.rzd.ru/',
# Why: #2993 in Alexa global
'http://www.opensooq.com/',
# Why: #2995 in Alexa global
'http://www.pizzahut.com/',
# Why: #2996 in Alexa global
'http://www.marksandspencer.com/',
# Why: #2997 in Alexa global
'http://www.filenuke.com/',
# Why: #2998 in Alexa global
'http://www.filelist.ro/',
# Why: #2999 in Alexa global
'http://www.akharinnews.com/',
# Why: #3000 in Alexa global
'http://www.etrade.com/',
# Why: #3002 in Alexa global
'http://www.planetromeo.com/',
# Why: #3003 in Alexa global
'http://www.wpbeginner.com/',
# Why: #3004 in Alexa global
'http://www.bancomercantil.com/',
# Why: #3005 in Alexa global
'http://www.pastdate.com/',
# Why: #3006 in Alexa global
'http://www.webutation.net/',
# Why: #3007 in Alexa global
'http://www.mywebgrocer.com/',
# Why: #3008 in Alexa global
'http://www.mobile.ir/',
# Why: #3009 in Alexa global
'http://www.seemorgh.com/',
# Why: #3010 in Alexa global
'http://www.nhs.uk/',
# Why: #3011 in Alexa global
'http://www.google.ba/',
# Why: #3012 in Alexa global
'http://ileehoo.com/',
# Why: #3013 in Alexa global
'http://www.seobook.com/',
# Why: #3014 in Alexa global
'http://www.wetteronline.de/',
# Why: #3015 in Alexa global
'http://www.happy-porn.com/',
# Why: #3016 in Alexa global
'http://www.theonion.com/',
# Why: #3017 in Alexa global
'http://www.webnode.com/',
# Why: #3018 in Alexa global
'http://www.svaiza.com/',
# Why: #3019 in Alexa global
'http://www.newsbomb.gr/',
# Why: #3020 in Alexa global
'http://www.t88u.com/',
# Why: #3021 in Alexa global
'http://www.tsn.ca/',
# Why: #3022 in Alexa global
'http://www.unity3d.com/',
# Why: #3023 in Alexa global
'http://www.nseindia.com/',
# Why: #3024 in Alexa global
'http://www.juegosdiarios.com/',
# Why: #3025 in Alexa global
'http://www.genieo.com/',
# Why: #3026 in Alexa global
'http://www.kelkoo.com/',
# Why: #3027 in Alexa global
'http://gome.com.cn/',
# Why: #3028 in Alexa global
'http://www.shabdkosh.com/',
# Why: #3029 in Alexa global
'http://www.tecmundo.com.br/',
# Why: #3030 in Alexa global
'http://www.chinaunix.net/',
# Why: #3031 in Alexa global
'http://pchouse.com.cn/',
# Why: #3032 in Alexa global
'http://www.goo-net.com/',
# Why: #3033 in Alexa global
'http://www.asana.com/',
# Why: #3035 in Alexa global
'http://www.hdporn.in/',
# Why: #3036 in Alexa global
'http://www.bannersbroker.com/user/adpubcombo_dashboard/',
# Why: #3037 in Alexa global
'http://www.virtapay.com/',
# Why: #3038 in Alexa global
'http://www.jobdiagnosis.com/',
# Why: #3039 in Alexa global
'http://guokr.com/',
# Why: #3040 in Alexa global
'http://www.clickpoint.it/',
# Why: #3041 in Alexa global
'http://3dmgame.com/',
# Why: #3042 in Alexa global
'http://www.ashleymadison.com/',
# Why: #3043 in Alexa global
'http://www.utsprofitads.com/',
# Why: #3044 in Alexa global
'http://www.google.ee/',
# Why: #3045 in Alexa global
'http://www.365jia.cn/',
# Why: #3046 in Alexa global
'http://www.oyunskor.com/',
# Why: #3047 in Alexa global
'http://www.metro.co.uk/',
# Why: #3048 in Alexa global
'http://www.ebaumsworld.com/',
# Why: #3049 in Alexa global
'http://www.realsimple.com/',
# Why: #3050 in Alexa global
'http://www.3file.info/',
# Why: #3051 in Alexa global
'http://www.xcams.com/',
# Why: #3052 in Alexa global
'http://www.cyberforum.ru/',
# Why: #3053 in Alexa global
'http://www.babble.com/',
# Why: #3054 in Alexa global
'http://www.lidl.de/',
# Why: #3055 in Alexa global
'http://www.pixer.mobi/',
# Why: #3056 in Alexa global
'http://www.yell.com/',
# Why: #3057 in Alexa global
'http://www.alnilin.com/',
# Why: #3058 in Alexa global
'http://www.lurkmore.to/',
# Why: #3059 in Alexa global
'http://www.olx.co.za/',
# Why: #3060 in Alexa global
'http://www.eorezo.com/',
# Why: #3061 in Alexa global
'http://www.baby.ru/',
# Why: #3062 in Alexa global
'http://www.xdf.cn/',
# Why: #3063 in Alexa global
'http://www.redporntube.com/',
# Why: #3064 in Alexa global
'http://www.extabit.com/',
# Why: #3065 in Alexa global
'http://www.wayn.com/',
# Why: #3066 in Alexa global
'http://www.gaana.com/',
# Why: #3067 in Alexa global
'http://www.islamicfinder.org/',
# Why: #3068 in Alexa global
'http://www.venturebeat.com/',
# Why: #3069 in Alexa global
'http://www.played.to/',
# Why: #3070 in Alexa global
'http://www.alrakoba.net/',
# Why: #3071 in Alexa global
'http://www.mouthshut.com/',
# Why: #3072 in Alexa global
'http://www.banquepopulaire.fr/',
# Why: #3073 in Alexa global
'http://www.jal.co.jp/',
# Why: #3074 in Alexa global
'http://www.dasoertliche.de/',
# Why: #3075 in Alexa global
'http://www.1stwebdesigner.com/',
# Why: #3076 in Alexa global
'http://www.tam.com.br/',
# Why: #3077 in Alexa global
'http://www.nature.com/',
# Why: #3078 in Alexa global
'http://www.camfrog.com/',
# Why: #3079 in Alexa global
'http://www.philly.com/',
# Why: #3080 in Alexa global
'http://www.zemtv.com/',
# Why: #3081 in Alexa global
'http://www.oprah.com/',
# Why: #3082 in Alexa global
'http://www.wmaraci.com/',
# Why: #3083 in Alexa global
'http://www.ruvr.ru/',
# Why: #3084 in Alexa global
'http://www.gsn.com/',
# Why: #3085 in Alexa global
'http://www.acrobat.com/',
# Why: #3086 in Alexa global
'http://www.depositfiles.org/',
# Why: #3087 in Alexa global
'http://www.smartresponder.ru/',
# Why: #3088 in Alexa global
'http://www.huxiu.com/',
# Why: #3089 in Alexa global
'http://www.porn-wanted.com/',
# Why: #3090 in Alexa global
'http://www.tripadvisor.fr/',
# Why: #3091 in Alexa global
'http://3366.com/',
# Why: #3092 in Alexa global
'http://www.ranker.com/',
# Why: #3093 in Alexa global
'http://www.cibc.com/',
# Why: #3094 in Alexa global
'http://www.trend.az/',
# Why: #3095 in Alexa global
'http://www.whatsapp.com/',
# Why: #3096 in Alexa global
'http://07073.com/',
# Why: #3097 in Alexa global
'http://www.netload.in/',
# Why: #3098 in Alexa global
'http://www.channel4.com/',
# Why: #3099 in Alexa global
'http://www.yatra.com/',
# Why: #3100 in Alexa global
'http://www.elconfidencial.com/',
# Why: #3101 in Alexa global
'http://www.labnol.org/',
# Why: #3102 in Alexa global
'http://www.google.co.ke/',
# Why: #3103 in Alexa global
'http://www.disneylatino.com/',
# Why: #3104 in Alexa global
'http://www.pconverter.com/',
# Why: #3105 in Alexa global
'http://www.cqnews.net/',
# Why: #3106 in Alexa global
'http://www.blog.co.uk/',
# Why: #3107 in Alexa global
'http://www.immowelt.de/',
# Why: #3108 in Alexa global
'http://www.crunchyroll.com/',
# Why: #3109 in Alexa global
'http://www.gamesgames.com/',
# Why: #3110 in Alexa global
'http://www.protothema.gr/',
# Why: #3111 in Alexa global
'http://www.vmoptions.com/',
# Why: #3112 in Alexa global
'http://www.go2jump.org/',
# Why: #3113 in Alexa global
'http://www.psu.edu/',
# Why: #3114 in Alexa global
'http://www.sanjesh.org/',
# Why: #3115 in Alexa global
'http://www.sportingnews.com/',
# Why: #3116 in Alexa global
'http://www.televisionfanatic.com/',
# Why: #3117 in Alexa global
'http://www.fansshare.com/',
# Why: #3118 in Alexa global
'http://www.xcams4u.com/',
# Why: #3119 in Alexa global
'http://www.dict.cn/',
# Why: #3120 in Alexa global
'http://www.madthumbs.com/',
# Why: #3121 in Alexa global
'http://www.ebates.com/',
# Why: #3122 in Alexa global
'http://www.eromon.net/',
# Why: #3123 in Alexa global
'http://www.copyblogger.com/',
# Why: #3124 in Alexa global
'http://www.flirt4free.com/',
# Why: #3125 in Alexa global
'http://www.gaytube.com/',
# Why: #3126 in Alexa global
'http://www.notdoppler.com/',
# Why: #3127 in Alexa global
'http://www.allmyvideos.net/',
# Why: #3128 in Alexa global
'http://www.cam4.de.com/',
# Why: #3129 in Alexa global
'http://www.chosun.com/',
# Why: #3130 in Alexa global
'http://www.adme.ru/',
# Why: #3131 in Alexa global
'http://www.codeplex.com/',
# Why: #3132 in Alexa global
'http://www.jumia.com.ng/',
# Why: #3133 in Alexa global
'http://www.digitaltrends.com/',
# Why: #3134 in Alexa global
'http://www.b92.net/',
# Why: #3135 in Alexa global
'http://www.miniinthebox.com/',
# Why: #3136 in Alexa global
'http://www.radaronline.com/',
# Why: #3137 in Alexa global
'http://www.hujiang.com/',
# Why: #3138 in Alexa global
'http://www.gardenweb.com/',
# Why: #3139 in Alexa global
'http://www.pizap.com/',
# Why: #3140 in Alexa global
'http://www.iptorrents.com/',
# Why: #3141 in Alexa global
'http://www.yuku.com/',
# Why: #3142 in Alexa global
'http://www.mega-giochi.it/',
# Why: #3143 in Alexa global
'http://www.nrk.no/',
# Why: #3144 in Alexa global
'http://www.99designs.com/',
# Why: #3145 in Alexa global
'http://www.uscis.gov/',
# Why: #3146 in Alexa global
'http://www.lostfilm.tv/',
# Why: #3147 in Alexa global
'http://www.mileroticos.com/',
# Why: #3148 in Alexa global
'http://www.republika.co.id/',
# Why: #3149 in Alexa global
'http://www.sharethis.com/',
# Why: #3150 in Alexa global
'http://www.samplicio.us/',
# Why: #3151 in Alexa global
'http://www.1saleaday.com/',
# Why: #3152 in Alexa global
'http://www.vonelo.com/',
# Why: #3153 in Alexa global
'http://www.oyunmoyun.com/',
# Why: #3154 in Alexa global
'http://www.flightradar24.com/',
# Why: #3155 in Alexa global
'http://www.geo.tv/',
# Why: #3156 in Alexa global
'http://www.nexusmods.com/',
# Why: #3157 in Alexa global
'http://www.mizuhobank.co.jp/',
# Why: #3158 in Alexa global
'http://www.blogspot.fi/',
# Why: #3159 in Alexa global
'http://www.directtrack.com/',
# Why: #3160 in Alexa global
'http://www.media.net/',
# Why: #3161 in Alexa global
'http://www.bigresource.com/',
# Why: #3162 in Alexa global
'http://www.free-lance.ru/',
# Why: #3163 in Alexa global
'http://www.loveplanet.ru/',
# Why: #3164 in Alexa global
'http://www.ilfattoquotidiano.it/',
# Why: #3165 in Alexa global
'http://www.coolmovs.com/',
# Why: #3166 in Alexa global
'http://www.mango.com/',
# Why: #3167 in Alexa global
'http://www.nj.com/',
# Why: #3168 in Alexa global
'http://www.magazineluiza.com.br/',
# Why: #3169 in Alexa global
'http://www.datehookup.com/',
# Why: #3170 in Alexa global
'http://www.registro.br/',
# Why: #3171 in Alexa global
'http://www.debenhams.com/',
# Why: #3172 in Alexa global
'http://www.jqueryui.com/',
# Why: #3173 in Alexa global
'http://www.palcomp3.com/',
# Why: #3174 in Alexa global
'http://www.opensubtitles.org/',
# Why: #3175 in Alexa global
'http://www.socialmediatoday.com/',
# Why: #3176 in Alexa global
'http://3158.cn/',
# Why: #3178 in Alexa global
'http://www.allgameshome.com/',
# Why: #3179 in Alexa global
'http://www.pricegrabber.com/',
# Why: #3180 in Alexa global
'http://www.lufthansa.com/',
# Why: #3181 in Alexa global
'http://www.ip-adress.com/',
# Why: #3182 in Alexa global
'http://www.business-standard.com/',
# Why: #3183 in Alexa global
'http://www.games.com/',
# Why: #3184 in Alexa global
'http://www.zaman.com.tr/',
# Why: #3185 in Alexa global
'http://www.jagranjosh.com/',
# Why: #3186 in Alexa global
'http://www.mint.com/',
# Why: #3187 in Alexa global
'http://www.gorillavid.in/',
# Why: #3188 in Alexa global
'http://www.google.com.om/',
# Why: #3189 in Alexa global
'http://www.blogbigtime.com/',
# Why: #3190 in Alexa global
'http://www.books.com.tw/',
# Why: #3191 in Alexa global
'http://www.korrespondent.net/',
# Why: #3192 in Alexa global
'http://www.nymag.com/',
# Why: #3193 in Alexa global
'http://www.proporn.com/',
# Why: #3194 in Alexa global
'http://ycasmd.info/',
# Why: #3195 in Alexa global
'http://www.persiantools.com/',
# Why: #3196 in Alexa global
'http://www.torrenthound.com/',
# Why: #3197 in Alexa global
'http://www.bestsexo.com/',
# Why: #3198 in Alexa global
'http://www.alwatanvoice.com/',
# Why: #3199 in Alexa global
'http://www.jahannews.com/',
# Why: #3200 in Alexa global
'http://www.bluewin.ch/',
# Why: #3201 in Alexa global
'http://www.sap.com/',
# Why: #3203 in Alexa global
'http://www.rzb.ir/',
# Why: #3204 in Alexa global
'http://www.myorderbox.com/',
# Why: #3205 in Alexa global
'http://www.dealsandsavings.net/',
# Why: #3206 in Alexa global
'http://www.goldenline.pl/',
# Why: #3207 in Alexa global
'http://www.stuff.co.nz/',
# Why: #3208 in Alexa global
'http://www.opentable.com/',
# Why: #3209 in Alexa global
'http://www.4738.com/',
# Why: #3210 in Alexa global
'http://www.freshersworld.com/',
# Why: #3211 in Alexa global
'http://www.state.pa.us/',
# Why: #3212 in Alexa global
'http://www.lavanguardia.com/',
# Why: #3213 in Alexa global
'http://www.sudu.cn/',
# Why: #3214 in Alexa global
'http://www.mob.org/',
# Why: #3215 in Alexa global
'http://www.vodafone.in/',
# Why: #3216 in Alexa global
'http://www.blogdetik.com/',
# Why: #3217 in Alexa global
'http://www.888.it/',
# Why: #3218 in Alexa global
'http://www.passportindia.gov.in/',
# Why: #3219 in Alexa global
'http://www.ssa.gov/',
# Why: #3220 in Alexa global
'http://www.desitvforum.net/',
# Why: #3221 in Alexa global
'http://www.8684.cn/',
# Why: #3222 in Alexa global
'http://www.rajasthan.gov.in/',
# Why: #3223 in Alexa global
'http://www.youtube.com/user/PewDiePie/',
# Why: #3224 in Alexa global
'http://www.zonealarm.com/',
# Why: #3225 in Alexa global
'http://www.locaweb.com.br/',
# Why: #3226 in Alexa global
'http://logme.in/',
# Why: #3227 in Alexa global
'http://www.fetlife.com/',
# Why: #3228 in Alexa global
'http://www.lyricsfreak.com/',
# Why: #3229 in Alexa global
'http://www.te3p.com/',
# Why: #3230 in Alexa global
'http://www.hmrc.gov.uk/',
# Why: #3231 in Alexa global
'http://www.bravoerotica.com/',
# Why: #3232 in Alexa global
'http://www.kolesa.kz/',
# Why: #3233 in Alexa global
'http://www.vinescope.com/',
# Why: #3234 in Alexa global
'http://www.shoplocal.com/',
# Why: #3236 in Alexa global
'http://b2b.cn/',
# Why: #3237 in Alexa global
'http://www.mydrivers.com/',
# Why: #3238 in Alexa global
'http://www.xhamster.com/user/video/',
# Why: #3239 in Alexa global
'http://www.bigideamastermind.com/',
# Why: #3240 in Alexa global
'http://www.uncoverthenet.com/',
# Why: #3241 in Alexa global
'http://www.ragecomic.com/',
# Why: #3242 in Alexa global
'http://www.yodobashi.com/',
# Why: #3243 in Alexa global
'http://titan24.com/',
# Why: #3244 in Alexa global
'http://www.nocoty.pl/',
# Why: #3245 in Alexa global
'http://www.turkishairlines.com/',
# Why: #3246 in Alexa global
'http://www.liputan6.com/',
# Why: #3247 in Alexa global
'http://www.3suisses.fr/',
# Why: #3248 in Alexa global
'http://www.cancan.ro/',
# Why: #3249 in Alexa global
'http://www.apetube.com/',
# Why: #3250 in Alexa global
'http://www.kurir-info.rs/',
# Why: #3251 in Alexa global
'http://www.wow.com/',
# Why: #3252 in Alexa global
'http://www.myblogguest.com/',
# Why: #3253 in Alexa global
'http://www.wp.com/',
# Why: #3254 in Alexa global
'http://www.tre.it/',
# Why: #3255 in Alexa global
'http://www.livrariasaraiva.com.br/',
# Why: #3256 in Alexa global
'http://www.ubuntuforums.org/',
# Why: #3257 in Alexa global
'http://www.fujitv.co.jp/',
# Why: #3258 in Alexa global
'http://www.serverfault.com/',
# Why: #3259 in Alexa global
'http://www.princeton.edu/',
# Why: #3260 in Alexa global
'http://www.experienceproject.com/',
# Why: #3261 in Alexa global
'http://www.ero-video.net/',
# Why: #3262 in Alexa global
'http://www.west263.com/',
# Why: #3263 in Alexa global
'http://www.nguoiduatin.vn/',
# Why: #3264 in Alexa global
'http://www.findthebest.com/',
# Why: #3265 in Alexa global
'http://www.iol.pt/',
# Why: #3266 in Alexa global
'http://www.hotukdeals.com/',
# Why: #3267 in Alexa global
'http://www.filmifullizle.com/',
# Why: #3268 in Alexa global
'http://www.blog.hu/',
# Why: #3269 in Alexa global
'http://www.dailyfinance.com/',
# Why: #3270 in Alexa global
'http://www.bigxvideos.com/',
# Why: #3271 in Alexa global
'http://www.adreactor.com/',
# Why: #3272 in Alexa global
'http://www.fmworld.net/',
# Why: #3273 in Alexa global
'http://www.fumu.com/',
# Why: #3274 in Alexa global
'http://www.ntv.ru/',
# Why: #3275 in Alexa global
'http://www.poringa.net/',
# Why: #3276 in Alexa global
'http://www.syosetu.com/',
# Why: #3277 in Alexa global
'http://www.giantsextube.com/',
# Why: #3278 in Alexa global
'http://www.uuu9.com/',
# Why: #3279 in Alexa global
'http://www.babosas.com/',
# Why: #3280 in Alexa global
'http://www.square-enix.com/',
# Why: #3281 in Alexa global
'http://www.bankia.es/',
# Why: #3282 in Alexa global
'http://www.freedownloadmanager.org/',
# Why: #3283 in Alexa global
'http://www.add-anime.net/',
# Why: #3284 in Alexa global
'http://www.tuttomercatoweb.com/',
# Why: #3285 in Alexa global
'http://www.192.com/',
# Why: #3286 in Alexa global
'http://www.freekaamaal.com/',
# Why: #3287 in Alexa global
'http://www.youngpornvideos.com/',
# Why: #3288 in Alexa global
'http://www.nbc.com/',
# Why: #3289 in Alexa global
'http://www.jne.co.id/',
# Why: #3290 in Alexa global
'http://www.fobshanghai.com/',
# Why: #3291 in Alexa global
'http://www.johnlewis.com/',
# Why: #3292 in Alexa global
'http://www.mvideo.ru/',
# Why: #3293 in Alexa global
'http://www.bhinneka.com/',
# Why: #3294 in Alexa global
'http://www.gooddrama.net/',
# Why: #3295 in Alexa global
'http://www.lobstertube.com/',
# Why: #3296 in Alexa global
'http://www.ovguide.com/',
# Why: #3297 in Alexa global
'http://www.joemonster.org/',
# Why: #3298 in Alexa global
'http://editor.wix.com/',
# Why: #3299 in Alexa global
'http://www.wechat.com/',
# Why: #3300 in Alexa global
'http://www.locanto.in/',
# Why: #3301 in Alexa global
'http://www.video2mp3.net/',
# Why: #3303 in Alexa global
'http://www.couchsurfing.org/',
# Why: #3304 in Alexa global
'http://www.tchibo.de/',
# Why: #3305 in Alexa global
'http://rol.ro/',
# Why: #3306 in Alexa global
'http://www.toroporno.com/',
# Why: #3307 in Alexa global
'http://www.backlinkwatch.com/',
# Why: #3308 in Alexa global
'http://www.greatergood.com/',
# Why: #3309 in Alexa global
'http://www.smartaddressbar.com/',
# Why: #3310 in Alexa global
'http://www.getgoodlinks.ru/',
# Why: #3311 in Alexa global
'http://www.fitbit.com/',
# Why: #3312 in Alexa global
'http://www.elcorteingles.es/',
# Why: #3313 in Alexa global
'http://www.up2c.com/',
# Why: #3314 in Alexa global
'http://www.rg.ru/',
# Why: #3315 in Alexa global
'http://www.ftalk.com/',
# Why: #3316 in Alexa global
'http://www.apartmenttherapy.com/',
# Why: #3317 in Alexa global
'http://www.blogspot.hu/',
# Why: #3318 in Alexa global
'http://www.e-rewards.com/',
# Why: #3319 in Alexa global
'http://weloveshopping.com/',
# Why: #3320 in Alexa global
'http://www.swtor.com/',
# Why: #3321 in Alexa global
'http://www.abs-cbnnews.com/',
# Why: #3322 in Alexa global
'http://www.webpagetest.org/',
# Why: #3323 in Alexa global
'http://www.ricardo.ch/',
# Why: #3324 in Alexa global
'http://www.ghatreh.com/',
# Why: #3325 in Alexa global
'http://www.ibps.in/',
# Why: #3326 in Alexa global
'http://www.moneymakergroup.com/',
# Why: #3327 in Alexa global
'http://www.exist.ru/',
# Why: #3328 in Alexa global
'http://www.kakprosto.ru/',
# Why: #3329 in Alexa global
'http://www.gradeuptube.com/',
# Why: #3330 in Alexa global
'http://lastampa.it/',
# Why: #3331 in Alexa global
'http://www.medicinenet.com/',
# Why: #3332 in Alexa global
'http://www.theknot.com/',
# Why: #3333 in Alexa global
'http://www.yale.edu/',
# Why: #3334 in Alexa global
'http://www.mail.uol.com.br/',
# Why: #3335 in Alexa global
'http://www.okazii.ro/',
# Why: #3336 in Alexa global
'http://www.wa.gov/',
# Why: #3337 in Alexa global
'http://www.gmhuowan.com/',
# Why: #3338 in Alexa global
'http://www.cnhubei.com/',
# Why: #3339 in Alexa global
'http://www.dickssportinggoods.com/',
# Why: #3340 in Alexa global
'http://instaforex.com/',
# Why: #3341 in Alexa global
'http://www.zdf.de/',
# Why: #3342 in Alexa global
'http://www.getpocket.com/',
# Why: #3343 in Alexa global
'http://www.takungpao.com/',
# Why: #3344 in Alexa global
'http://www.junkmail.co.za/',
# Why: #3345 in Alexa global
'http://www.tripwiremagazine.com/',
# Why: #3346 in Alexa global
'http://www.popcap.com/',
# Why: #3347 in Alexa global
'http://www.kotobank.jp/',
# Why: #3348 in Alexa global
'http://www.bangbros.com/',
# Why: #3349 in Alexa global
'http://www.shtyle.fm/',
# Why: #3350 in Alexa global
'http://www.jungle.gr/',
# Why: #3351 in Alexa global
'http://www.apserver.net/',
# Why: #3352 in Alexa global
'http://www.mzamin.com/',
# Why: #3353 in Alexa global
'http://www.google.lu/',
# Why: #3354 in Alexa global
'http://www.squarebux.com/',
# Why: #3355 in Alexa global
'http://www.bollywoodhungama.com/',
# Why: #3356 in Alexa global
'http://www.milfmovs.com/',
# Why: #3357 in Alexa global
'http://www.softonic.it/',
# Why: #3358 in Alexa global
'http://www.hsw.cn/',
# Why: #3359 in Alexa global
'http://www.cyberciti.biz/',
# Why: #3360 in Alexa global
'http://www.scout.com/',
# Why: #3361 in Alexa global
'http://www.teensnow.com/',
# Why: #3362 in Alexa global
'http://www.pornper.com/',
# Why: #3363 in Alexa global
'http://www.torrentreactor.net/',
# Why: #3364 in Alexa global
'http://www.smotri.com/',
# Why: #3365 in Alexa global
'http://www.startpage.com/',
# Why: #3366 in Alexa global
'http://www.climatempo.com.br/',
# Why: #3367 in Alexa global
'http://www.bigrock.in/',
# Why: #3368 in Alexa global
'http://www.kajabi.com/',
# Why: #3369 in Alexa global
'http://www.imgchili.com/',
# Why: #3370 in Alexa global
'http://www.dogpile.com/',
# Why: #3371 in Alexa global
'http://www.thestreet.com/',
# Why: #3372 in Alexa global
'http://www.sport24.gr/',
# Why: #3373 in Alexa global
'http://www.tophotels.ru/',
# Why: #3374 in Alexa global
'http://www.shopping.uol.com.br/',
# Why: #3375 in Alexa global
'http://www.bbva.es/',
# Why: #3376 in Alexa global
'http://www.perfectmoney.com/',
# Why: #3377 in Alexa global
'http://www.cashmachines2.com/',
# Why: #3378 in Alexa global
'http://www.skroutz.gr/',
# Why: #3379 in Alexa global
'http://www.logitech.com/',
# Why: #3380 in Alexa global
'http://www.seriescoco.com/',
# Why: #3381 in Alexa global
'http://www.fastclick.com/',
# Why: #3382 in Alexa global
'http://www.cambridge.org/',
# Why: #3383 in Alexa global
'http://www.fark.com/',
# Why: #3384 in Alexa global
'http://www.krypt.com/',
# Why: #3385 in Alexa global
'http://www.indiangilma.com/',
# Why: #3386 in Alexa global
'http://www.safe-swaps.com/',
# Why: #3387 in Alexa global
'http://www.trenitalia.com/',
# Why: #3388 in Alexa global
'http://www.flycell.com.mx/',
# Why: #3389 in Alexa global
'http://www.livefreefun.com/',
# Why: #3390 in Alexa global
'http://www.ourtoolbar.com/',
# Why: #3391 in Alexa global
'http://www.anandtech.com/',
# Why: #3392 in Alexa global
'http://www.neimanmarcus.com/',
# Why: #3393 in Alexa global
'http://www.lelong.com.my/',
# Why: #3394 in Alexa global
'http://www.pulscen.ru/',
# Why: #3395 in Alexa global
'http://www.paginegialle.it/',
# Why: #3396 in Alexa global
'http://www.intelius.com/',
# Why: #3397 in Alexa global
'http://www.orange.pl/',
# Why: #3398 in Alexa global
'http://www.aktuality.sk/',
# Why: #3399 in Alexa global
'http://www.webgame.in.th/',
# Why: #3400 in Alexa global
'http://www.runescape.com/',
# Why: #3401 in Alexa global
'http://www.rocketnews24.com/',
# Why: #3402 in Alexa global
'http://www.lineadirecta.com/',
# Why: #3403 in Alexa global
'http://www.origin.com/',
# Why: #3404 in Alexa global
'http://www.newsbeast.gr/',
# Why: #3405 in Alexa global
'http://www.justhookup.com/',
# Why: #3406 in Alexa global
'http://www.rakuten-bank.co.jp/',
# Why: #3407 in Alexa global
'http://www.lifenews.ru/',
# Why: #3408 in Alexa global
'http://www.sitemeter.com/',
# Why: #3410 in Alexa global
'http://www.isbank.com.tr/',
# Why: #3411 in Alexa global
'http://www.commerzbanking.de/',
# Why: #3412 in Alexa global
'http://www.marthastewart.com/',
# Why: #3413 in Alexa global
'http://www.ntvmsnbc.com/',
# Why: #3414 in Alexa global
'http://www.seloger.com/',
# Why: #3415 in Alexa global
'http://www.vend-o.com/',
# Why: #3416 in Alexa global
'http://www.almanar.com.lb/',
# Why: #3417 in Alexa global
'http://www.sifyitest.com/',
# Why: #3418 in Alexa global
'http://taojindi.com/',
# Why: #3419 in Alexa global
'http://www.mylife.com/',
# Why: #3420 in Alexa global
'http://www.talkfusion.com/',
# Why: #3421 in Alexa global
'http://www.hichina.com/',
# Why: #3422 in Alexa global
'http://www.paruvendu.fr/',
# Why: #3423 in Alexa global
'http://www.admcsport.com/',
# Why: #3424 in Alexa global
'http://www.tudogostoso.uol.com.br/',
# Why: #3425 in Alexa global
'http://www.faz.net/',
# Why: #3426 in Alexa global
'http://www.narutoget.com/',
# Why: #3427 in Alexa global
'http://www.wufoo.com/',
# Why: #3428 in Alexa global
'http://www.feedads-srv.com/',
# Why: #3429 in Alexa global
'http://www.gophoto.it/',
# Why: #3430 in Alexa global
'http://www.tgju.org/',
# Why: #3431 in Alexa global
'http://www.dynamicdrive.com/',
# Why: #3432 in Alexa global
'http://www.centurylink.net/',
# Why: #3433 in Alexa global
'http://www.ngs.ru/',
# Why: #3434 in Alexa global
'http://anyap.info/',
# Why: #3435 in Alexa global
'http://www.dailykos.com/',
# Why: #3437 in Alexa global
'http://www.95559.com.cn/',
# Why: #3438 in Alexa global
'http://www.malaysiakini.com/',
# Why: #3439 in Alexa global
'http://www.uefa.com/',
# Why: #3440 in Alexa global
'http://www.socialmediaexaminer.com/',
# Why: #3441 in Alexa global
'http://www.empowernetwork.com/qokpPCiefhWcRT/',
# Why: #3442 in Alexa global
'http://www.peperonity.de/',
# Why: #3443 in Alexa global
'http://www.support.wordpress.com/',
# Why: #3444 in Alexa global
'http://www.hola.com/',
# Why: #3445 in Alexa global
'http://www.readmanga.eu/',
# Why: #3446 in Alexa global
'http://www.jstv.com/',
# Why: #3447 in Alexa global
'http://www.irib.ir/',
# Why: #3448 in Alexa global
'http://www.bookingbuddy.com/',
# Why: #3449 in Alexa global
'http://www.computerhope.com/',
# Why: #3450 in Alexa global
'http://www.ilovemobi.com/',
# Why: #3451 in Alexa global
'http://www.pinkrod.com/',
# Why: #3452 in Alexa global
'http://www.videobash.com/',
# Why: #3453 in Alexa global
'http://www.alfemminile.com/',
# Why: #3454 in Alexa global
'http://www.tu.tv/',
# Why: #3455 in Alexa global
'http://www.utro.ru/',
# Why: #3456 in Alexa global
'http://www.urbanoutfitters.com/',
# Why: #3457 in Alexa global
'http://www.autozone.com/',
# Why: #3458 in Alexa global
'http://www.gilt.com/',
# Why: #3459 in Alexa global
'http://www.atpworldtour.com/',
# Why: #3460 in Alexa global
'http://www.goibibo.com/',
# Why: #3461 in Alexa global
'http://www.propellerpops.com/',
# Why: #3462 in Alexa global
'http://www.cornell.edu/',
# Why: #3463 in Alexa global
'http://www.flashscore.com/',
# Why: #3464 in Alexa global
'http://www.babyblog.ru/',
# Why: #3465 in Alexa global
'http://www.sport-fm.gr/',
# Why: #3466 in Alexa global
'http://www.viamichelin.fr/',
# Why: #3467 in Alexa global
'http://www.newyorker.com/',
# Why: #3468 in Alexa global
'http://www.tagesschau.de/',
# Why: #3469 in Alexa global
'http://www.guiamais.com.br/',
# Why: #3470 in Alexa global
'http://www.jeux.fr/',
# Why: #3471 in Alexa global
'http://www.pontofrio.com.br/',
# Why: #3472 in Alexa global
'http://www.dm5.com/',
# Why: #3474 in Alexa global
'http://www.ss.lv/',
# Why: #3475 in Alexa global
'http://www.mirtesen.ru/',
# Why: #3476 in Alexa global
'http://www.money.pl/',
# Why: #3477 in Alexa global
'http://www.tlbsearch.com/',
# Why: #3478 in Alexa global
'http://www.usembassy.gov/',
# Why: #3479 in Alexa global
'http://www.cineblog01.net/',
# Why: #3480 in Alexa global
'http://www.nur.kz/',
# Why: #3481 in Alexa global
'http://www.hotnewhiphop.com/',
# Why: #3482 in Alexa global
'http://www.mp3sheriff.com/',
# Why: #3483 in Alexa global
'http://www.games.co.id/',
# Why: #3485 in Alexa global
'http://www.deviantclip.com/',
# Why: #3486 in Alexa global
'http://www.list.ru/',
# Why: #3487 in Alexa global
'http://www.xitek.com/',
# Why: #3488 in Alexa global
'http://www.netvibes.com/',
# Why: #3489 in Alexa global
'http://www.24sata.hr/',
# Why: #3490 in Alexa global
'http://www.usda.gov/',
# Why: #3491 in Alexa global
'http://www.zerofreeporn.com/',
# Why: #3492 in Alexa global
'http://www.tvb.com/',
# Why: #3493 in Alexa global
'http://www.decolar.com/',
# Why: #3494 in Alexa global
'http://www.worldfree4u.com/',
# Why: #3495 in Alexa global
'http://www.dzone.com/',
# Why: #3496 in Alexa global
'http://www.wikiquote.org/',
# Why: #3497 in Alexa global
'http://www.techtunes.com.bd/',
# Why: #3498 in Alexa global
'http://www.pornup.me/',
# Why: #3499 in Alexa global
'http://www.blogutils.net/',
# Why: #3500 in Alexa global
'http://www.yupoo.com/',
# Why: #3501 in Alexa global
'http://www.peoplesmart.com/',
# Why: #3502 in Alexa global
'http://www.kijiji.it/',
# Why: #3503 in Alexa global
'http://usairways.com/',
# Why: #3504 in Alexa global
'http://www.betfred.com/',
# Why: #3505 in Alexa global
'http://www.ow.ly/',
# Why: #3506 in Alexa global
'http://www.nsw.gov.au/',
# Why: #3507 in Alexa global
'http://www.mci.ir/',
# Why: #3508 in Alexa global
'http://www.iranecar.com/',
# Why: #3509 in Alexa global
'http://www.wisegeek.com/',
# Why: #3510 in Alexa global
'http://www.gocomics.com/',
# Why: #3511 in Alexa global
'http://www.bramjnet.com/',
# Why: #3512 in Alexa global
'http://www.bit.ly/',
# Why: #3514 in Alexa global
'http://www.timesofindia.com/',
# Why: #3515 in Alexa global
'http://www.xingcloud.com/',
# Why: #3516 in Alexa global
'http://www.geocities.co.jp/',
# Why: #3517 in Alexa global
'http://www.tfl.gov.uk/',
# Why: #3518 in Alexa global
'http://www.derstandard.at/',
# Why: #3519 in Alexa global
'http://www.icq.com/',
# Why: #3520 in Alexa global
'http://www.orange.co.uk/',
# Why: #3521 in Alexa global
'http://www.pornokopilka.info/',
# Why: #3522 in Alexa global
'http://www.88db.com/',
# Why: #3524 in Alexa global
'http://www.house365.com/',
# Why: #3525 in Alexa global
'http://www.collegehumor.com/',
# Why: #3526 in Alexa global
'http://www.gfxtra.com/',
# Why: #3527 in Alexa global
'http://www.borsapernegati.com/',
# Why: #3528 in Alexa global
'http://pensador.uol.com.br/',
# Why: #3529 in Alexa global
'http://www.surveygifters.com/',
# Why: #3531 in Alexa global
'http://bmail.uol.com.br/',
# Why: #3532 in Alexa global
'http://www.ec21.com/',
# Why: #3533 in Alexa global
'http://www.seoprofiler.com/',
# Why: #3534 in Alexa global
'http://www.goldporntube.com/',
# Why: #3535 in Alexa global
'http://www.tvtropes.org/',
# Why: #3536 in Alexa global
'http://www.techtarget.com/',
# Why: #3537 in Alexa global
'http://www.juno.com/',
# Why: #3538 in Alexa global
'http://www.visual.ly/',
# Why: #3539 in Alexa global
'http://www.dardarkom.com/',
# Why: #3540 in Alexa global
'http://www.showup.tv/',
# Why: #3541 in Alexa global
'http://www.three.co.uk/',
# Why: #3543 in Alexa global
'http://www.shopstyle.com/',
# Why: #3544 in Alexa global
'http://www.penguinvids.com/',
# Why: #3545 in Alexa global
'http://www.trainenquiry.com/',
# Why: #3546 in Alexa global
'http://www.soha.vn/',
# Why: #3547 in Alexa global
'http://www.fengniao.com/',
# Why: #3548 in Alexa global
'http://carschina.com/',
# Why: #3549 in Alexa global
'http://www.500wan.com/',
# Why: #3550 in Alexa global
'http://www.perfectinter.net/',
# Why: #3551 in Alexa global
'http://www.elog-ch.com/',
# Why: #3552 in Alexa global
'http://www.thetoptens.com/',
# Why: #3553 in Alexa global
'http://www.ecnavi.jp/',
# Why: #3554 in Alexa global
'http://www.1616.net/',
# Why: #3555 in Alexa global
'http://www.nationwide.co.uk/',
# Why: #3556 in Alexa global
'http://www.myhabit.com/',
# Why: #3557 in Alexa global
'http://www.kinomaniak.tv/',
# Why: #3558 in Alexa global
'http://www.googlecode.com/',
# Why: #3559 in Alexa global
'http://www.kddi.com/',
# Why: #3560 in Alexa global
'http://www.wyborcza.biz/',
# Why: #3561 in Alexa global
'http://www.gtbank.com/',
# Why: #3562 in Alexa global
'http://zigwheels.com/',
# Why: #3563 in Alexa global
'http://www.lepoint.fr/',
# Why: #3564 in Alexa global
'http://www.formula1.com/',
# Why: #3565 in Alexa global
'http://www.nissen.co.jp/',
# Why: #3566 in Alexa global
'http://www.baomoi.com/',
# Why: #3567 in Alexa global
'http://www.apa.az/',
# Why: #3568 in Alexa global
'http://www.movie2k.to/',
# Why: #3569 in Alexa global
'http://www.irpopup.ir/',
# Why: #3570 in Alexa global
'http://www.nps.gov/',
# Why: #3571 in Alexa global
'http://www.lachainemeteo.com/',
# Why: #3572 in Alexa global
'http://www.x-art.com/',
# Why: #3573 in Alexa global
'http://www.bakecaincontrii.com/',
# Why: #3574 in Alexa global
'http://www.longtailvideo.com/',
# Why: #3575 in Alexa global
'http://www.yengo.com/',
# Why: #3576 in Alexa global
'http://www.listentoyoutube.com/',
# Why: #3577 in Alexa global
'http://www.dreamhost.com/',
# Why: #3578 in Alexa global
'http://www.cari.com.my/',
# Why: #3579 in Alexa global
'http://www.sergeymavrodi.com/',
# Why: #3580 in Alexa global
'http://www.boursorama.com/',
# Why: #3581 in Alexa global
'http://www.extra.com.br/',
# Why: #3582 in Alexa global
'http://www.msnbc.com/',
# Why: #3583 in Alexa global
'http://www.xiaomi.cn/',
# Why: #3585 in Alexa global
'http://www.uwants.com/',
# Why: #3586 in Alexa global
'http://www.utexas.edu/',
# Why: #3587 in Alexa global
'http://www.alc.co.jp/',
# Why: #3588 in Alexa global
'http://www.minijuegos.com/',
# Why: #3589 in Alexa global
'http://www.mumayi.com/',
# Why: #3590 in Alexa global
'http://www.sogi.com.tw/',
# Why: #3591 in Alexa global
'http://www.skorer.tv/',
# Why: #3592 in Alexa global
'http://ddmap.com/',
# Why: #3593 in Alexa global
'http://www.ebog.com/',
# Why: #3594 in Alexa global
'http://www.artlebedev.ru/',
# Why: #3595 in Alexa global
'http://www.venere.com/',
# Why: #3596 in Alexa global
'http://www.academic.ru/',
# Why: #3597 in Alexa global
'http://www.mako.co.il/',
# Why: #3598 in Alexa global
'http://www.nabble.com/',
# Why: #3599 in Alexa global
'http://www.autodesk.com/',
# Why: #3600 in Alexa global
'http://www.vertitechnologygroup.com/',
# Why: #3601 in Alexa global
'http://www.leaseweb.com/',
# Why: #3602 in Alexa global
'http://www.yoox.com/',
# Why: #3603 in Alexa global
'http://www.papajohns.com/',
# Why: #3604 in Alexa global
'http://www.unmillondeutilidades.com/',
# Why: #3605 in Alexa global
'http://www.webmasters.ru/',
# Why: #3606 in Alexa global
'http://www.seoclerks.com/',
# Why: #3607 in Alexa global
'http://www.yootheme.com/',
# Why: #3608 in Alexa global
'http://www.google.com.py/',
# Why: #3609 in Alexa global
'http://www.beemp3.com/',
# Why: #3610 in Alexa global
'http://www.yepme.com/',
# Why: #3611 in Alexa global
'http://www.alef.ir/',
# Why: #3613 in Alexa global
'http://www.gotowebinar.com/',
# Why: #3614 in Alexa global
'http://www.onec.dz/',
# Why: #3615 in Alexa global
'http://www.bonprix.de/',
# Why: #3616 in Alexa global
'http://www.landsend.com/',
# Why: #3617 in Alexa global
'http://www.libertatea.ro/',
# Why: #3618 in Alexa global
'http://www.timeout.com/',
# Why: #3619 in Alexa global
'http://www.appnexus.com/',
# Why: #3620 in Alexa global
'http://www.uproxx.com/',
# Why: #3622 in Alexa global
'http://www.alohatube.com/',
# Why: #3623 in Alexa global
'http://www.citilink.ru/',
# Why: #3624 in Alexa global
'http://www.askubuntu.com/',
# Why: #3625 in Alexa global
'http://www.freemake.com/',
# Why: #3626 in Alexa global
'http://www.rockettheme.com/',
# Why: #3627 in Alexa global
'http://www.tupaki.com/',
# Why: #3628 in Alexa global
'http://www.53.com/',
# Why: #3629 in Alexa global
'http://www.tune.pk/',
# Why: #3630 in Alexa global
'http://www.standardchartered.com/',
# Why: #3631 in Alexa global
'http://www.video-i365.com/',
# Why: #3632 in Alexa global
'http://www.knowyourmeme.com/',
# Why: #3633 in Alexa global
'http://www.gofeminin.de/',
# Why: #3634 in Alexa global
'http://www.vmware.com/',
# Why: #3635 in Alexa global
'http://www.vbox7.com/',
# Why: #3636 in Alexa global
'http://www.webfail.com/',
# Why: #3637 in Alexa global
'http://www.onewebsearch.com/',
# Why: #3638 in Alexa global
'http://www.xnxxmovies.com/',
# Why: #3639 in Alexa global
'http://www.blogspot.hk/',
# Why: #3640 in Alexa global
'http://www.hgtv.com/',
# Why: #3641 in Alexa global
'http://www.findagrave.com/',
# Why: #3642 in Alexa global
'http://www.yoast.com/',
# Why: #3643 in Alexa global
'http://www.audiopoisk.com/',
# Why: #3644 in Alexa global
'http://www.sexytube.me/',
# Why: #3645 in Alexa global
'http://www.centerblog.net/',
# Why: #3646 in Alexa global
'http://www.webpronews.com/',
# Why: #3647 in Alexa global
'http://www.prnewswire.com/',
# Why: #3648 in Alexa global
'http://www.vietnamnet.vn/',
# Why: #3649 in Alexa global
'http://www.groupon.co.in/',
# Why: #3650 in Alexa global
'http://www.bom.gov.au/',
# Why: #3651 in Alexa global
'http://www.loxblog.com/',
# Why: #3652 in Alexa global
'http://www.llnw.com/',
# Why: #3653 in Alexa global
'http://www.jcrew.com/',
# Why: #3654 in Alexa global
'http://www.carsensor.net/',
# Why: #3655 in Alexa global
'http://www.aukro.cz/',
# Why: #3656 in Alexa global
'http://www.zoomby.ru/',
# Why: #3657 in Alexa global
'http://www.wallstcheatsheet.com/',
# Why: #3658 in Alexa global
'http://www.17k.com/',
# Why: #3659 in Alexa global
'http://www.secondlife.com/',
# Why: #3660 in Alexa global
'http://www.marmiton.org/',
# Why: #3661 in Alexa global
'http://www.zorpia.com/',
# Why: #3662 in Alexa global
'http://www.searchya.com/',
# Why: #3663 in Alexa global
'http://www.rtl2.de/',
# Why: #3664 in Alexa global
'http://www.wiocha.pl/',
# Why: #3665 in Alexa global
'http://www.28tui.com/',
# Why: #3666 in Alexa global
'http://www.shopzilla.com/',
# Why: #3667 in Alexa global
'http://www.google.com.ni/',
# Why: #3668 in Alexa global
'http://www.lycos.com/',
# Why: #3669 in Alexa global
'http://www.gucheng.com/',
# Why: #3670 in Alexa global
'http://www.rajanews.com/',
# Why: #3671 in Alexa global
'http://www.blackhatteam.com/',
# Why: #3672 in Alexa global
'http://www.mp3.es/',
# Why: #3673 in Alexa global
'http://www.forums.wordpress.com/',
# Why: #3674 in Alexa global
'http://www.micromaxinfo.com/',
# Why: #3675 in Alexa global
'http://www.sub.jp/',
# Why: #3676 in Alexa global
'http://www.duden.de/',
# Why: #3677 in Alexa global
'http://www.nyc.gov/',
# Why: #3679 in Alexa global
'http://www.monova.org/',
# Why: #3680 in Alexa global
'http://www.al-wlid.com/',
# Why: #3681 in Alexa global
'http://www.dastelefonbuch.de/',
# Why: #3682 in Alexa global
'http://www.cam4ultimate.com/',
# Why: #3683 in Alexa global
'http://www.inps.it/',
# Why: #3684 in Alexa global
'http://www.nazwa.pl/',
# Why: #3685 in Alexa global
'http://www.beatport.com/',
# Why: #3686 in Alexa global
'http://www.wizzair.com/',
# Why: #3687 in Alexa global
'http://www.thomann.de/',
# Why: #3688 in Alexa global
'http://www.juntadeandalucia.es/',
# Why: #3689 in Alexa global
'http://www.oficialsurveyscenter.co/',
# Why: #3690 in Alexa global
'http://www.zaluu.com/',
# Why: #3691 in Alexa global
'http://www.videarn.com/',
# Why: #3692 in Alexa global
'http://www.azcentral.com/',
# Why: #3693 in Alexa global
'http://www.xvideosmovie.com/',
# Why: #3694 in Alexa global
'http://www.eforosh.com/',
# Why: #3696 in Alexa global
'http://www.movie25.com/',
# Why: #3697 in Alexa global
'http://www.creditkarma.com/',
# Why: #3698 in Alexa global
'http://upi.com/',
# Why: #3699 in Alexa global
'http://www.mozook.com/',
# Why: #3700 in Alexa global
'http://www.heavy.com/',
# Why: #3701 in Alexa global
'http://www.worldoftanks.com/',
# Why: #3702 in Alexa global
'http://www.vkrugudruzei.ru/',
# Why: #3704 in Alexa global
'http://www.hourlyrevshare.net/',
# Why: #3705 in Alexa global
'http://www.walkerplus.com/',
# Why: #3706 in Alexa global
'http://www.btyou.com/',
# Why: #3707 in Alexa global
'http://www.adzibiz.com/',
# Why: #3708 in Alexa global
'http://www.tryflirting.com/',
# Why: #3709 in Alexa global
'http://www.moi.gov.sa/',
# Why: #3710 in Alexa global
'http://www.cooltext.com/',
# Why: #3711 in Alexa global
'http://www.dawanda.com/',
# Why: #3712 in Alexa global
'http://www.travian.com.sa/',
# Why: #3713 in Alexa global
'http://www.va.gov/',
# Why: #3714 in Alexa global
'http://www.sunmaker.com/',
# Why: #3715 in Alexa global
'http://www.aaa.com/',
# Why: #3716 in Alexa global
'http://www.dinodirect.com/',
# Why: #3717 in Alexa global
'http://www.cima4u.com/',
# Why: #3718 in Alexa global
'http://www.huaban.com/',
# Why: #3719 in Alexa global
'http://www.nzherald.co.nz/',
# Why: #3720 in Alexa global
'http://www.plotek.pl/',
# Why: #3722 in Alexa global
'http://www.chow.com/',
# Why: #3723 in Alexa global
'http://www.rincondelvago.com/',
# Why: #3724 in Alexa global
'http://uzai.com/',
# Why: #3725 in Alexa global
'http://www.dbw.cn/',
# Why: #3727 in Alexa global
'http://www.stayfriends.de/',
# Why: #3728 in Alexa global
'http://www.reed.co.uk/',
# Why: #3729 in Alexa global
'http://www.rainpow.com/',
# Why: #3730 in Alexa global
'http://www.dallasnews.com/',
# Why: #3731 in Alexa global
'http://www.ntvspor.net/',
# Why: #3732 in Alexa global
'http://www.fonearena.com/',
# Why: #3733 in Alexa global
'http://www.forocoches.com/',
# Why: #3734 in Alexa global
'http://www.myfonts.com/',
# Why: #3735 in Alexa global
'http://www.fenopy.se/',
# Why: #3736 in Alexa global
'http://www.animefreak.tv/',
# Why: #3737 in Alexa global
'http://www.websitewelcome.com/',
# Why: #3738 in Alexa global
'http://www.indonetwork.co.id/',
# Why: #3739 in Alexa global
'http://www.mapsofindia.com/',
# Why: #3740 in Alexa global
'http://www.newlook.com/',
# Why: #3741 in Alexa global
'http://www.holiday-weather.com/',
# Why: #3742 in Alexa global
'http://zhe800.com/',
# Why: #3743 in Alexa global
'http://www.recipesfinder.com/',
# Why: #3744 in Alexa global
'http://www.bankrate.com.cn/',
# Why: #3745 in Alexa global
'http://www.bbom.com.br/',
# Why: #3746 in Alexa global
'http://www.dahe.cn/',
# Why: #3747 in Alexa global
'http://www.jalopnik.com/',
# Why: #3748 in Alexa global
'http://www.canon.com/',
# Why: #3750 in Alexa global
'http://www.freshbooks.com/',
# Why: #3751 in Alexa global
'http://www.clickcompare.info/',
# Why: #3752 in Alexa global
'http://www.aprod.hu/',
# Why: #3753 in Alexa global
'http://www.thisav.com/',
# Why: #3754 in Alexa global
'http://www.boerse.bz/',
# Why: #3755 in Alexa global
'http://www.orange.es/',
# Why: #3756 in Alexa global
'http://www.forobeta.com/',
# Why: #3757 in Alexa global
'http://www.surfactif.fr/',
# Why: #3758 in Alexa global
'http://www.listverse.com/',
# Why: #3759 in Alexa global
'http://www.feedjit.com/',
# Why: #3760 in Alexa global
'http://www.ntv.co.jp/',
# Why: #3761 in Alexa global
'http://www.bni.co.id/',
# Why: #3762 in Alexa global
'http://www.gamemazing.com/',
# Why: #3763 in Alexa global
'http://www.mbalib.com/',
# Why: #3764 in Alexa global
'http://www.topsy.com/',
# Why: #3765 in Alexa global
'http://www.torchbrowser.com/',
# Why: #3766 in Alexa global
'http://www.ieee.org/',
# Why: #3767 in Alexa global
'http://www.tinydeal.com/',
# Why: #3768 in Alexa global
'http://www.playdom.com/',
# Why: #3769 in Alexa global
'http://www.redorbit.com/',
# Why: #3770 in Alexa global
'http://www.inboxdollars.com/',
# Why: #3771 in Alexa global
'http://www.google.com.bh/',
# Why: #3772 in Alexa global
'http://www.pcanalysis.net/',
# Why: #3773 in Alexa global
'http://www.acer.com/',
# Why: #3774 in Alexa global
'http://www.jizzbell.com/',
# Why: #3775 in Alexa global
'http://www.google.com.kh/',
# Why: #3776 in Alexa global
'http://www.mappy.com/',
# Why: #3777 in Alexa global
'http://www.day.az/',
# Why: #3778 in Alexa global
'http://www.euronews.com/',
# Why: #3779 in Alexa global
'http://www.wikidot.com/',
# Why: #3780 in Alexa global
'http://www.creativecommons.org/',
# Why: #3781 in Alexa global
'http://www.quantcast.com/',
# Why: #3782 in Alexa global
'http://www.iconarchive.com/',
# Why: #3783 in Alexa global
'http://www.iyaya.com/',
# Why: #3784 in Alexa global
'http://www.jetstar.com/',
# Why: #3786 in Alexa global
'http://diandian.com/',
# Why: #3787 in Alexa global
'http://www.winzip.com/',
# Why: #3788 in Alexa global
'http://www.clixzor.com/',
# Why: #3789 in Alexa global
'http://www.teebik.com/',
# Why: #3790 in Alexa global
'http://meilele.com/',
# Why: #3791 in Alexa global
'http://www.gsm.ir/',
# Why: #3792 in Alexa global
'http://dek-d.com/',
# Why: #3793 in Alexa global
'http://www.giantbomb.com/',
# Why: #3794 in Alexa global
'http://www.tala.ir/',
# Why: #3795 in Alexa global
'http://www.extremetracking.com/',
# Why: #3796 in Alexa global
'http://www.homevv.com/',
# Why: #3797 in Alexa global
'http://www.truthaboutabs.com/',
# Why: #3798 in Alexa global
'http://www.psychologytoday.com/',
# Why: #3800 in Alexa global
'http://www.vod.pl/',
# Why: #3801 in Alexa global
'http://www.macromill.com/',
# Why: #3802 in Alexa global
'http://www.pagseguro.uol.com.br/',
# Why: #3804 in Alexa global
'http://www.amd.com/',
# Why: #3805 in Alexa global
'http://www.livescience.com/',
# Why: #3806 in Alexa global
'http://dedecms.com/',
# Why: #3807 in Alexa global
'http://www.jin115.com/',
# Why: #3808 in Alexa global
'http://www.ampxchange.com/',
# Why: #3809 in Alexa global
'http://www.profitcentr.com/',
# Why: #3810 in Alexa global
'http://www.webmotors.com.br/',
# Why: #3811 in Alexa global
'http://www.lan.com/',
# Why: #3812 in Alexa global
'http://www.fileice.net/',
# Why: #3813 in Alexa global
'http://www.ingdirect.es/',
# Why: #3814 in Alexa global
'http://www.amtrak.com/',
# Why: #3815 in Alexa global
'http://www.emag.ro/',
# Why: #3816 in Alexa global
'http://www.progressive.com/',
# Why: #3817 in Alexa global
'http://www.balatarin.com/',
# Why: #3818 in Alexa global
'http://www.immonet.de/',
# Why: #3819 in Alexa global
'http://www.e-travel.com/',
# Why: #3820 in Alexa global
'http://www.studymode.com/',
# Why: #3821 in Alexa global
'http://www.go2000.com/',
# Why: #3822 in Alexa global
'http://www.shopbop.com/',
# Why: #3823 in Alexa global
'http://www.filesfetcher.com/',
# Why: #3824 in Alexa global
'http://www.euroresidentes.com/',
# Why: #3825 in Alexa global
'http://www.movistar.es/',
# Why: #3826 in Alexa global
'http://lefeng.com/',
# Why: #3827 in Alexa global
'http://www.google.hn/',
# Why: #3828 in Alexa global
'http://www.homestead.com/',
# Why: #3829 in Alexa global
'http://www.filesonar.com/',
# Why: #3830 in Alexa global
'http://www.hsbccreditcard.com/',
# Why: #3831 in Alexa global
'http://www.google.com.np/',
# Why: #3832 in Alexa global
'http://www.parperfeito.com.br/',
# Why: #3833 in Alexa global
'http://www.sciencedaily.com/',
# Why: #3834 in Alexa global
'http://www.realgfporn.com/',
# Why: #3835 in Alexa global
'http://www.wonderhowto.com/',
# Why: #3836 in Alexa global
'http://www.rakuten-card.co.jp/',
# Why: #3837 in Alexa global
'http://www.coolrom.com/',
# Why: #3838 in Alexa global
'http://www.wikibooks.org/',
# Why: #3839 in Alexa global
'http://www.archdaily.com/',
# Why: #3840 in Alexa global
'http://www.gigazine.net/',
# Why: #3841 in Alexa global
'http://www.totaljerkface.com/',
# Why: #3842 in Alexa global
'http://www.bezaat.com/',
# Why: #3843 in Alexa global
'http://www.eurosport.com/',
# Why: #3844 in Alexa global
'http://www.fontspace.com/',
# Why: #3845 in Alexa global
'http://www.tirage24.com/',
# Why: #3846 in Alexa global
'http://www.bancomer.com.mx/',
# Why: #3847 in Alexa global
'http://www.nasdaq.com/',
# Why: #3848 in Alexa global
'http://www.bravoteens.com/',
# Why: #3849 in Alexa global
'http://www.bdjobs.com/',
# Why: #3850 in Alexa global
'http://www.zimbra.free.fr/',
# Why: #3851 in Alexa global
'http://www.arsenal.com/',
# Why: #3852 in Alexa global
'http://www.rabota.ru/',
# Why: #3853 in Alexa global
'http://www.lovefilm.com/',
# Why: #3854 in Alexa global
'http://www.artemisweb.jp/',
# Why: #3855 in Alexa global
'http://www.tsetmc.com/',
# Why: #3856 in Alexa global
'http://www.movshare.net/',
# Why: #3857 in Alexa global
'http://www.debonairblog.com/',
# Why: #3858 in Alexa global
'http://www.zmovie.co/',
# Why: #3859 in Alexa global
'http://www.peoplefinders.com/',
# Why: #3860 in Alexa global
'http://www.mercadolibre.com/',
# Why: #3861 in Alexa global
'http://www.connectlondoner.com/',
# Why: #3862 in Alexa global
'http://www.forbes.ru/',
# Why: #3863 in Alexa global
'http://www.gagnezauxoptions.com/',
# Why: #3864 in Alexa global
'http://www.taikang.com/',
# Why: #3865 in Alexa global
'http://www.mywapblog.com/',
# Why: #3866 in Alexa global
'http://www.citysearch.com/',
# Why: #3867 in Alexa global
'http://www.novafinanza.com/',
# Why: #3868 in Alexa global
'http://www.gruposantander.es/',
# Why: #3869 in Alexa global
'http://www.relianceada.com/',
# Why: #3870 in Alexa global
'http://www.rankingsandreviews.com/',
# Why: #3871 in Alexa global
'http://www.p-world.co.jp/',
# Why: #3872 in Alexa global
'http://hjenglish.com/',
# Why: #3873 in Alexa global
'http://www.state.nj.us/',
# Why: #3874 in Alexa global
'http://www.comdirect.de/',
# Why: #3875 in Alexa global
'http://www.claro.com.br/',
# Why: #3876 in Alexa global
'http://www.alluc.to/',
# Why: #3877 in Alexa global
'http://www.godlikeproductions.com/',
# Why: #3878 in Alexa global
'http://www.lowyat.net/',
# Why: #3879 in Alexa global
'http://www.dawn.com/',
# Why: #3880 in Alexa global
'http://www.18xgirls.com/',
# Why: #3881 in Alexa global
'http://www.origo.hu/',
# Why: #3882 in Alexa global
'http://www.loopnet.com/',
# Why: #3883 in Alexa global
'http://www.payu.in/',
# Why: #3884 in Alexa global
'http://www.digitalmedia-comunicacion.com/',
# Why: #3885 in Alexa global
'http://www.newsvine.com/',
# Why: #3886 in Alexa global
'http://www.petfinder.com/',
# Why: #3887 in Alexa global
'http://www.kuaibo.com/',
# Why: #3888 in Alexa global
'http://www.soft32.com/',
# Why: #3889 in Alexa global
'http://www.yellowpages.ca/',
# Why: #3890 in Alexa global
'http://www.1fichier.com/',
# Why: #3891 in Alexa global
'http://www.egyup.com/',
# Why: #3892 in Alexa global
'http://www.iskullgames.com/',
# Why: #3893 in Alexa global
'http://www.androidforums.com/',
# Why: #3894 in Alexa global
'http://www.blogspot.cz/',
# Why: #3895 in Alexa global
'http://www.umich.edu/',
# Why: #3896 in Alexa global
'http://www.madsextube.com/',
# Why: #3897 in Alexa global
'http://www.bigcinema.tv/',
# Why: #3898 in Alexa global
'http://www.donedeal.ie/',
# Why: #3899 in Alexa global
'http://www.winporn.com/',
# Why: #3900 in Alexa global
'http://www.cosmopolitan.com/',
# Why: #3901 in Alexa global
'http://www.reg.ru/',
# Why: #3902 in Alexa global
'http://www.localmoxie.com/',
# Why: #3903 in Alexa global
'http://www.kootation.com/',
# Why: #3904 in Alexa global
'http://www.gidonline.ru/',
# Why: #3905 in Alexa global
'http://www.clipconverter.cc/',
# Why: #3906 in Alexa global
'http://www.gioco.it/',
# Why: #3907 in Alexa global
'http://www.ravelry.com/',
# Why: #3908 in Alexa global
'http://www.gettyimages.com/',
# Why: #3909 in Alexa global
'http://www.nanapi.jp/',
# Why: #3910 in Alexa global
'http://www.medicalnewsreporter.com/',
# Why: #3911 in Alexa global
'http://www.shop411.com/',
# Why: #3912 in Alexa global
'http://www.aif.ru/',
# Why: #3913 in Alexa global
'http://www.journaldesfemmes.com/',
# Why: #3914 in Alexa global
'http://www.blogcu.com/',
# Why: #3915 in Alexa global
'http://www.vanguard.com/',
# Why: #3916 in Alexa global
'http://www.freemp3go.com/',
# Why: #3917 in Alexa global
'http://www.google.ci/',
# Why: #3918 in Alexa global
'http://www.findicons.com/',
# Why: #3919 in Alexa global
'http://www.tineye.com/',
# Why: #3920 in Alexa global
'http://www.webdesignerdepot.com/',
# Why: #3921 in Alexa global
'http://www.nomorerack.com/',
# Why: #3922 in Alexa global
'http://www.iqoo.me/',
# Why: #3923 in Alexa global
'http://www.amarujala.com/',
# Why: #3924 in Alexa global
'http://pengfu.com/',
# Why: #3925 in Alexa global
'http://www.leadpages.net/',
# Why: #3926 in Alexa global
'http://www.zalukaj.tv/',
# Why: #3927 in Alexa global
'http://www.avon.com/',
# Why: #3928 in Alexa global
'http://www.casasbahia.com.br/',
# Why: #3929 in Alexa global
'http://www.juegosdechicas.com/',
# Why: #3930 in Alexa global
'http://www.tvrain.ru/',
# Why: #3931 in Alexa global
'http://www.askmefast.com/',
# Why: #3932 in Alexa global
'http://www.stockcharts.com/',
# Why: #3934 in Alexa global
'http://www.footlocker.com/',
# Why: #3935 in Alexa global
'http://www.allanalpass.com/',
# Why: #3936 in Alexa global
'http://www.theoatmeal.com/',
# Why: #3937 in Alexa global
'http://www.storify.com/',
# Why: #3938 in Alexa global
'http://www.santander.com.br/',
# Why: #3939 in Alexa global
'http://www.laughnfiddle.com/',
# Why: #3940 in Alexa global
'http://www.lomadee.com/',
# Why: #3941 in Alexa global
'http://aftenposten.no/',
# Why: #3942 in Alexa global
'http://www.lamoda.ru/',
# Why: #3943 in Alexa global
'http://www.tasteofhome.com/',
# Why: #3944 in Alexa global
'http://www.news247.gr/',
# Why: #3946 in Alexa global
'http://www.sherdog.com/',
# Why: #3947 in Alexa global
'http://www.milb.com/',
# Why: #3948 in Alexa global
'http://www.3djuegos.com/',
# Why: #3949 in Alexa global
'http://www.dreammovies.com/',
# Why: #3950 in Alexa global
'http://www.commonfloor.com/',
# Why: #3951 in Alexa global
'http://www.tharunee.lk/',
# Why: #3952 in Alexa global
'http://www.chatrandom.com/',
# Why: #3953 in Alexa global
'http://xs8.cn/',
# Why: #3955 in Alexa global
'http://www.rechargeitnow.com/',
# Why: #3956 in Alexa global
'http://am15.net/',
# Why: #3957 in Alexa global
'http://www.sexad.net/',
# Why: #3958 in Alexa global
'http://www.herokuapp.com/',
# Why: #3959 in Alexa global
'http://www.apontador.com.br/',
# Why: #3960 in Alexa global
'http://www.rfi.fr/',
# Why: #3961 in Alexa global
'http://www.woozworld.com/',
# Why: #3962 in Alexa global
'http://www.hitta.se/',
# Why: #3963 in Alexa global
'http://www.comedycentral.com/',
# Why: #3964 in Alexa global
'http://www.fbsbx.com/',
# Why: #3965 in Alexa global
'http://www.aftabnews.ir/',
# Why: #3966 in Alexa global
'http://www.stepstone.de/',
# Why: #3967 in Alexa global
'http://www.filmon.com/',
# Why: #3969 in Alexa global
'http://www.smbc.co.jp/',
# Why: #3970 in Alexa global
'http://www.ameritrade.com/',
# Why: #3971 in Alexa global
'http://www.ecitic.com/',
# Why: #3972 in Alexa global
'http://www.bola.net/',
# Why: #3973 in Alexa global
'http://www.nexon.co.jp/',
# Why: #3974 in Alexa global
'http://www.hellowork.go.jp/',
# Why: #3975 in Alexa global
'http://www.hq-sex-tube.com/',
# Why: #3976 in Alexa global
'http://www.gsp.ro/',
# Why: #3977 in Alexa global
'http://www.groupon.co.uk/',
# Why: #3978 in Alexa global
'http://www.20min.ch/',
# Why: #3979 in Alexa global
'http://www.barclaycardus.com/',
# Why: #3980 in Alexa global
'http://www.dice.com/',
# Why: #3981 in Alexa global
'http://himasoku.com/',
# Why: #3982 in Alexa global
'http://www.nwsource.com/',
# Why: #3983 in Alexa global
'http://www.gougou.com/',
# Why: #3984 in Alexa global
'http://www.iol.co.za/',
# Why: #3985 in Alexa global
'http://www.thinkgeek.com/',
# Why: #3986 in Alexa global
'http://www.governmentjobs.com/',
# Why: #3987 in Alexa global
'http://www.500.com/',
# Why: #3988 in Alexa global
'http://www.caixin.com/',
# Why: #3989 in Alexa global
'http://www.elsevier.com/',
# Why: #3990 in Alexa global
'http://www.navitime.co.jp/',
# Why: #3991 in Alexa global
'http://www.rafflecopter.com/',
# Why: #3992 in Alexa global
'http://www.auctiva.com/',
# Why: #3994 in Alexa global
'http://www.pracuj.pl/',
# Why: #3995 in Alexa global
'http://www.strato.de/',
# Why: #3996 in Alexa global
'http://www.ricardoeletro.com.br/',
# Why: #3997 in Alexa global
'http://www.vodafone.de/',
# Why: #3998 in Alexa global
'http://www.jike.com/',
# Why: #3999 in Alexa global
'http://www.smosh.com/',
# Why: #4000 in Alexa global
'http://www.downlite.net/',
# Why: #4001 in Alexa global
'http://to8to.com/',
# Why: #4003 in Alexa global
'http://www.tikona.in/',
# Why: #4004 in Alexa global
'http://www.royalmail.com/',
# Why: #4005 in Alexa global
'http://www.tripadvisor.de/',
# Why: #4006 in Alexa global
'http://www.realclearpolitics.com/',
# Why: #4007 in Alexa global
'http://www.pubdirecte.com/',
# Why: #4008 in Alexa global
'http://www.rassd.com/',
# Why: #4009 in Alexa global
'http://www.ptt.cc/',
# Why: #4010 in Alexa global
'http://www.townhall.com/',
# Why: #4011 in Alexa global
'http://www.theoldreader.com/',
# Why: #4012 in Alexa global
'http://www.viki.com/',
# Why: #4013 in Alexa global
'http://www.one.com/',
# Why: #4014 in Alexa global
'http://www.peopleperhour.com/',
# Why: #4015 in Alexa global
'http://www.desidime.com/',
# Why: #4016 in Alexa global
'http://www.17track.net/',
# Why: #4017 in Alexa global
'http://www.duote.com/',
# Why: #4018 in Alexa global
'http://www.emuch.net/',
# Why: #4019 in Alexa global
'http://www.mlgame.co.uk/',
# Why: #4020 in Alexa global
'http://www.rockstargames.com/',
# Why: #4021 in Alexa global
'http://www.slaati.com/',
# Why: #4022 in Alexa global
'http://www.ibibo.com/',
# Why: #4023 in Alexa global
'http://www.journaldunet.com/',
# Why: #4024 in Alexa global
'http://www.ria.ua/',
# Why: #4025 in Alexa global
'http://www.odatv.com/',
# Why: #4026 in Alexa global
'http://www.comodo.com/',
# Why: #4027 in Alexa global
'http://www.clickfair.com/',
# Why: #4028 in Alexa global
'http://www.system500.com/',
# Why: #4029 in Alexa global
'http://www.wordstream.com/',
# Why: #4030 in Alexa global
'http://www.alexaboostup.com/',
# Why: #4031 in Alexa global
'http://www.yjbys.com/',
# Why: #4032 in Alexa global
'http://www.hsbc.com/',
# Why: #4033 in Alexa global
'http://www.online-convert.com/',
# Why: #4034 in Alexa global
'http://www.miui.com/',
# Why: #4035 in Alexa global
'http://www.totaljobs.com/',
# Why: #4036 in Alexa global
'http://www.travian.fr/',
# Why: #4037 in Alexa global
'http://www.funda.nl/',
# Why: #4038 in Alexa global
'http://www.bazos.sk/',
# Why: #4039 in Alexa global
'http://www.efukt.com/',
# Why: #4040 in Alexa global
'http://www.startlap.com/',
# Why: #4041 in Alexa global
'http://www.hir24.hu/',
# Why: #4042 in Alexa global
'http://www.mrskin.com/',
# Why: #4043 in Alexa global
'http://dbs.com/',
# Why: #4044 in Alexa global
'http://www.sevenforums.com/',
# Why: #4045 in Alexa global
'http://www.admitad.com/',
# Why: #4046 in Alexa global
'http://www.graaam.com/',
# Why: #4047 in Alexa global
'http://www.exactme.com/',
# Why: #4048 in Alexa global
'http://www.roadrunner.com/',
# Why: #4049 in Alexa global
'http://www.liberation.fr/',
# Why: #4050 in Alexa global
'http://www.cas.sk/',
# Why: #4051 in Alexa global
'http://www.redbubble.com/',
# Why: #4052 in Alexa global
'http://www.ezilon.com/',
# Why: #4053 in Alexa global
'http://www.hihi2.com/',
# Why: #4054 in Alexa global
'http://www.net.hr/',
# Why: #4055 in Alexa global
'http://www.mediaite.com/',
# Why: #4056 in Alexa global
'http://www.clip2net.com/',
# Why: #4057 in Alexa global
'http://www.wapka.mobi/',
# Why: #4058 in Alexa global
'http://www.dailybasis.com/',
# Why: #4059 in Alexa global
'http://www.o2online.de/',
# Why: #4060 in Alexa global
'http://www.tweetdeck.com/',
# Why: #4061 in Alexa global
'http://www.tripadvisor.jp/',
# Why: #4062 in Alexa global
'http://www.fakt.pl/',
# Why: #4063 in Alexa global
'http://www.service-public.fr/',
# Why: #4064 in Alexa global
'http://www.shueisha.co.jp/',
# Why: #4065 in Alexa global
'http://www.searchina.ne.jp/',
# Why: #4066 in Alexa global
'http://www.bodisparking.com/',
# Why: #4067 in Alexa global
'http://www.corporationwiki.com/',
# Why: #4068 in Alexa global
'http://www.jandan.net/',
# Why: #4069 in Alexa global
'http://www.chsi.com.cn/',
# Why: #4070 in Alexa global
'http://www.alisoft.com/',
# Why: #4071 in Alexa global
'http://www.gosuslugi.ru/',
# Why: #4072 in Alexa global
'http://www.grxf.com/',
# Why: #4073 in Alexa global
'http://www.daserste.de/',
# Why: #4074 in Alexa global
'http://www.freedigitalphotos.net/',
# Why: #4075 in Alexa global
'http://www.flirchi.ru/',
# Why: #4076 in Alexa global
'http://www.seesaa.jp/',
# Why: #4077 in Alexa global
'http://www.htmlbook.ru/',
# Why: #4078 in Alexa global
'http://www.independent.ie/',
# Why: #4079 in Alexa global
'http://www.bufferapp.com/',
# Why: #4080 in Alexa global
'http://www.panzar.com/',
# Why: #4081 in Alexa global
'http://www.sport.cz/',
# Why: #4082 in Alexa global
'http://matomeantena.com/',
# Why: #4083 in Alexa global
'http://www.thenewporn.com/',
# Why: #4084 in Alexa global
'http://www.iran-tejarat.com/',
# Why: #4085 in Alexa global
'http://www.rotoworld.com/',
# Why: #4086 in Alexa global
'http://maalaimalar.com/',
# Why: #4087 in Alexa global
'http://www.poppen.de/',
# Why: #4088 in Alexa global
'http://www.tenki.jp/',
# Why: #4089 in Alexa global
'http://www.homes.co.jp/',
# Why: #4090 in Alexa global
'http://www.csfd.cz/',
# Why: #4091 in Alexa global
'http://www.2ip.ru/',
# Why: #4092 in Alexa global
'http://www.hawamer.com/',
# Why: #4093 in Alexa global
'http://www.telkomsel.com/',
# Why: #4094 in Alexa global
'http://www.un.org/',
# Why: #4095 in Alexa global
'http://www.autobinaryea.com/',
# Why: #4096 in Alexa global
'http://www.emgoldex.com/',
# Why: #4097 in Alexa global
'http://www.saksfifthavenue.com/',
# Why: #4098 in Alexa global
'http://www.realtor.ca/',
# Why: #4099 in Alexa global
'http://www.hdwallpapers.in/',
# Why: #4100 in Alexa global
'http://www.chinahr.com/',
# Why: #4101 in Alexa global
'http://www.niazerooz.com/',
# Why: #4102 in Alexa global
'http://www.sina.com/',
# Why: #4103 in Alexa global
'http://www.kinopod.ru/',
# Why: #4104 in Alexa global
'http://www.funweek.it/',
# Why: #4105 in Alexa global
'http://www.pornsake.com/',
# Why: #4106 in Alexa global
'http://www.vitacost.com/',
# Why: #4107 in Alexa global
'http://www.band.uol.com.br/',
# Why: #4108 in Alexa global
'http://www.110.com/',
# Why: #4109 in Alexa global
'http://www.jobomas.com/',
# Why: #4110 in Alexa global
'http://www.joyreactor.cc/',
# Why: #4111 in Alexa global
'http://www.3dnews.ru/',
# Why: #4112 in Alexa global
'http://www.vedomosti.ru/',
# Why: #4113 in Alexa global
'http://www.stansberryresearch.com/',
# Why: #4114 in Alexa global
'http://www.performersoft.com/',
# Why: #4115 in Alexa global
'http://www.codecademy.com/',
# Why: #4116 in Alexa global
'http://www.petsmart.com/',
# Why: #4118 in Alexa global
'http://www.kissmetrics.com/',
# Why: #4119 in Alexa global
'http://www.infojobs.it/',
# Why: #4120 in Alexa global
'http://www.wealink.com/',
# Why: #4121 in Alexa global
'http://www.rapidtrk.com/',
# Why: #4122 in Alexa global
'http://www.enterprise.com/',
# Why: #4123 in Alexa global
'http://www.iran-forum.ir/',
# Why: #4124 in Alexa global
'http://www.express-files.com/',
# Why: #4126 in Alexa global
'http://www.cyberpresse.ca/',
# Why: #4127 in Alexa global
'http://www.dobreprogramy.pl/',
# Why: #4128 in Alexa global
'http://www.uploading.com/',
# Why: #4129 in Alexa global
'http://www.profitclicking.com/',
# Why: #4130 in Alexa global
'http://www.playwartune.com/',
# Why: #4131 in Alexa global
'http://www.toluna.com/',
# Why: #4132 in Alexa global
'http://www.shoptime.com.br/',
# Why: #4134 in Alexa global
'http://www.totaladperformance.com/',
# Why: #4135 in Alexa global
'http://www.handelsblatt.com/',
# Why: #4136 in Alexa global
'http://www.hamshahrionline.ir/',
# Why: #4137 in Alexa global
'http://www.15min.lt/',
# Why: #4138 in Alexa global
'http://www.wyborcza.pl/',
# Why: #4139 in Alexa global
'http://www.flvto.com/',
# Why: #4140 in Alexa global
'http://www.microsofttranslator.com/',
# Why: #4141 in Alexa global
'http://www.trovaprezzi.it/',
# Why: #4142 in Alexa global
'http://www.eversave.com/',
# Why: #4143 in Alexa global
'http://www.wmzona.com/',
# Why: #4144 in Alexa global
'http://www.hardwarezone.com.sg/',
# Why: #4145 in Alexa global
'http://thestar.com.my/',
# Why: #4146 in Alexa global
'http://www.siliconindia.com/',
# Why: #4147 in Alexa global
'http://www.jfranews.com/',
# Why: #4148 in Alexa global
'http://www.emol.com/',
# Why: #4149 in Alexa global
'http://www.nordea.fi/',
# Why: #4150 in Alexa global
'http://hers.com.cn/',
# Why: #4151 in Alexa global
'http://www.heroturko.me/',
# Why: #4152 in Alexa global
'http://www.xat.com/',
# Why: #4153 in Alexa global
'http://www.3asq.com/',
# Why: #4154 in Alexa global
'http://www.hlntv.com/',
# Why: #4155 in Alexa global
'http://incruit.com/',
# Why: #4156 in Alexa global
'http://www.list-manage2.com/',
# Why: #4157 in Alexa global
'http://www.bulbagarden.net/',
# Why: #4158 in Alexa global
'http://www.blogdohotelurbano.com/',
# Why: #4159 in Alexa global
'http://www.suomi24.fi/',
# Why: #4160 in Alexa global
'http://www.nicozon.net/',
# Why: #4161 in Alexa global
'http://www.tuporno.tv/',
# Why: #4162 in Alexa global
'http://www.perfectworld.com/',
# Why: #4163 in Alexa global
'http://www.ayosdito.ph/',
# Why: #4164 in Alexa global
'http://www.gmx.at/',
# Why: #4165 in Alexa global
'http://www.123greetings.com/',
# Why: #4166 in Alexa global
'http://www.metafilter.com/',
# Why: #4167 in Alexa global
'http://www.g9g.com/',
# Why: #4168 in Alexa global
'http://www.searchnfind.org/',
# Why: #4169 in Alexa global
'http://www.pcgamer.com/',
# Why: #4170 in Alexa global
'http://economia.uol.com.br/',
# Why: #4171 in Alexa global
'http://www.on.cc/',
# Why: #4172 in Alexa global
'http://www.rentalcars.com/',
# Why: #4173 in Alexa global
'http://www.mail2web.com/',
# Why: #4174 in Alexa global
'http://www.zalando.it/',
# Why: #4175 in Alexa global
'http://www.freevideo.cz/',
# Why: #4176 in Alexa global
'http://www.source-wave.com/',
# Why: #4177 in Alexa global
'http://www.iranjib.ir/',
# Why: #4178 in Alexa global
'http://www.societe.com/',
# Why: #4179 in Alexa global
'http://www.160by2.com/',
# Why: #4180 in Alexa global
'http://www.berooztarinha.com/',
# Why: #4181 in Alexa global
'http://www.popmog.com/',
# Why: #4182 in Alexa global
'http://www.fantasy8.com/',
# Why: #4183 in Alexa global
'http://www.motortrend.com/',
# Why: #4184 in Alexa global
'http://www.huffingtonpost.ca/',
# Why: #4185 in Alexa global
'http://51test.net/',
# Why: #4186 in Alexa global
'http://www.ringtonematcher.com/',
# Why: #4187 in Alexa global
'http://www.ourtime.com/',
# Why: #4188 in Alexa global
'http://www.standardchartered.co.in/',
# Why: #4189 in Alexa global
'http://www.rdio.com/',
# Why: #4190 in Alexa global
'http://www.parsiblog.com/',
# Why: #4191 in Alexa global
'http://www.btvguide.com/',
# Why: #4192 in Alexa global
'http://www.sport.ro/',
# Why: #4193 in Alexa global
'http://www.freep.com/',
# Why: #4194 in Alexa global
'http://www.gismeteo.ua/',
# Why: #4195 in Alexa global
'http://www.rojadirecta.me/',
# Why: #4196 in Alexa global
'http://www.babol.pl/',
# Why: #4198 in Alexa global
'http://www.lun.com/',
# Why: #4199 in Alexa global
'http://www.epicurious.com/',
# Why: #4200 in Alexa global
'http://www.fetishok.com/',
# Why: #4201 in Alexa global
'http://www.mystart.com/',
# Why: #4202 in Alexa global
'http://www.wn.com/',
# Why: #4203 in Alexa global
'http://www.nationalrail.co.uk/',
# Why: #4204 in Alexa global
'http://www.feedsportal.com/',
# Why: #4205 in Alexa global
'http://www.rai.it/',
# Why: #4206 in Alexa global
'http://www.sportlemon.tv/',
# Why: #4207 in Alexa global
'http://www.groupon.com.br/',
# Why: #4208 in Alexa global
'http://www.ebay.at/',
# Why: #4209 in Alexa global
'http://www.yourdictionary.com/',
# Why: #4210 in Alexa global
'http://www.360safe.com/',
# Why: #4211 in Alexa global
'http://www.statefarm.com/',
# Why: #4212 in Alexa global
'http://www.desjardins.com/',
# Why: #4213 in Alexa global
'http://www.biblehub.com/',
# Why: #4214 in Alexa global
'http://www.mercadolibre.cl/',
# Why: #4215 in Alexa global
'http://www.eluniversal.com/',
# Why: #4216 in Alexa global
'http://www.lrytas.lt/',
# Why: #4217 in Alexa global
'http://www.youboy.com/',
# Why: #4218 in Alexa global
'http://www.gratka.pl/',
# Why: #4219 in Alexa global
'http://etype.com/',
# Why: #4220 in Alexa global
'http://www.reallifecam.com/',
# Why: #4221 in Alexa global
'http://www.imp.free.fr/',
# Why: #4222 in Alexa global
'http://www.jobstreet.co.id/',
# Why: #4223 in Alexa global
'http://www.geenstijl.nl/',
# Why: #4224 in Alexa global
'http://www.aebn.net/',
# Why: #4225 in Alexa global
'http://www.openoffice.org/',
# Why: #4226 in Alexa global
'http://www.diythemes.com/',
# Why: #4227 in Alexa global
'http://www.2gis.ru/',
# Why: #4228 in Alexa global
'http://www.wpmu.org/',
# Why: #4229 in Alexa global
'http://www.scrubtheweb.com/',
# Why: #4230 in Alexa global
'http://www.domain.com.au/',
# Why: #4231 in Alexa global
'http://www.buyma.com/',
# Why: #4232 in Alexa global
'http://www.ccbill.com/',
# Why: #4233 in Alexa global
'http://www.tui18.com/',
# Why: #4234 in Alexa global
'http://www.duga.jp/',
# Why: #4235 in Alexa global
'http://www.goforfiles.com/',
# Why: #4236 in Alexa global
'http://www.billionuploads.com/',
# Why: #4237 in Alexa global
'http://www.blogtalkradio.com/',
# Why: #4238 in Alexa global
'http://www.pipl.com/',
# Why: #4239 in Alexa global
'http://www.wallpaperswide.com/',
# Why: #4240 in Alexa global
'http://www.tuttosport.com/',
# Why: #4241 in Alexa global
'http://www.astucecherry.com/',
# Why: #4242 in Alexa global
'http://www.tradingfornewbies.com/',
# Why: #4243 in Alexa global
'http://www.umn.edu/',
# Why: #4244 in Alexa global
'http://www.rj.gov.br/',
# Why: #4245 in Alexa global
'http://www.mlive.com/',
# Why: #4246 in Alexa global
'http://www.justfab.com/',
# Why: #4247 in Alexa global
'http://www.ijreview.com/',
# Why: #4248 in Alexa global
'http://www.daniweb.com/',
# Why: #4249 in Alexa global
'http://www.quickmeme.com/',
# Why: #4250 in Alexa global
'http://www.safeway.com/',
# Why: #4251 in Alexa global
'http://www.virtualedge.com/',
# Why: #4252 in Alexa global
'http://www.saudiairlines.com/',
# Why: #4253 in Alexa global
'http://www.elbotola.com/',
# Why: #4254 in Alexa global
'http://www.holtgames.com/',
# Why: #4255 in Alexa global
'http://www.boots.com/',
# Why: #4256 in Alexa global
'http://www.potterybarn.com/',
# Why: #4257 in Alexa global
'http://www.mediamarkt.de/',
# Why: #4258 in Alexa global
'http://www.mangastream.com/',
# Why: #4259 in Alexa global
'http://www.mypoints.com/',
# Why: #4260 in Alexa global
'http://www.torrentdownloads.me/',
# Why: #4261 in Alexa global
'http://www.subtitleseeker.com/',
# Why: #4262 in Alexa global
'http://www.idlebrain.com/',
# Why: #4263 in Alexa global
'http://www.ekantipur.com/',
# Why: #4264 in Alexa global
'http://www.nowgamez.com/',
# Why: #4265 in Alexa global
'http://www.neoseeker.com/',
# Why: #4266 in Alexa global
'http://www.christianpost.com/',
# Why: #4267 in Alexa global
'http://www.joystiq.com/',
# Why: #4268 in Alexa global
'http://acesso.uol.com.br/',
# Why: #4269 in Alexa global
'http://www.bakufu.jp/',
# Why: #4270 in Alexa global
'http://www.iphone-winners.info/',
# Why: #4271 in Alexa global
'http://www.quizlet.com/',
# Why: #4272 in Alexa global
'http://www.prosport.ro/',
# Why: #4273 in Alexa global
'http://www.quanjing.com/',
# Why: #4274 in Alexa global
'http://autov.com.cn/',
# Why: #4275 in Alexa global
'http://www.gamechit.com/',
# Why: #4276 in Alexa global
'http://www.teleshow.pl/',
# Why: #4277 in Alexa global
'http://www.corrieredellosport.it/',
# Why: #4278 in Alexa global
'http://www.yoo7.com/',
# Why: #4279 in Alexa global
'http://fotocasa.es/',
# Why: #4280 in Alexa global
'http://www.attracta.com/',
# Why: #4281 in Alexa global
'http://www.hyatt.com/',
# Why: #4282 in Alexa global
'http://www.confirmit.com/',
# Why: #4283 in Alexa global
'http://www.xyu.tv/',
# Why: #4284 in Alexa global
'http://www.yoolplay.com/',
# Why: #4285 in Alexa global
'http://www.active.com/',
# Why: #4286 in Alexa global
'http://www.gizmag.com/',
# Why: #4287 in Alexa global
'http://www.hostelworld.com/',
# Why: #4288 in Alexa global
'http://www.pc6.com/',
# Why: #4289 in Alexa global
'http://www.lacentrale.fr/',
# Why: #4290 in Alexa global
'http://www.megasesso.com/',
# Why: #4291 in Alexa global
'http://www.thairath.co.th/',
# Why: #4292 in Alexa global
'http://www.thinkprogress.org/',
# Why: #4293 in Alexa global
'http://www.400gb.com/',
# Why: #4294 in Alexa global
'http://www.manageflitter.com/',
# Why: #4295 in Alexa global
'http://www.pronto.com/',
# Why: #4296 in Alexa global
'http://www.erotube.org/',
# Why: #4297 in Alexa global
'http://luxtarget.com/',
# Why: #4298 in Alexa global
'http://www.vui.vn/',
# Why: #4299 in Alexa global
'http://www.screenrant.com/',
# Why: #4300 in Alexa global
'http://www.nationalreview.com/',
# Why: #4301 in Alexa global
'http://www.ikman.lk/',
# Why: #4302 in Alexa global
'http://www.aboutus.org/',
# Why: #4303 in Alexa global
'http://www.booloo.com/',
# Why: #4304 in Alexa global
'http://www.klm.com/',
# Why: #4305 in Alexa global
'http://www.aukro.ua/',
# Why: #4307 in Alexa global
'http://www.skladchik.com/',
# Why: #4308 in Alexa global
'http://alfalfalfa.com/',
# Why: #4309 in Alexa global
'http://www.ghanaweb.com/',
# Why: #4310 in Alexa global
'http://www.cheetahmail.com/',
# Why: #4311 in Alexa global
'http://www.celebritynetworth.com/',
# Why: #4312 in Alexa global
'http://www.honda.com/',
# Why: #4313 in Alexa global
'http://www.regnum.ru/',
# Why: #4314 in Alexa global
'http://www.mediabistro.com/',
# Why: #4315 in Alexa global
'http://www.template-help.com/',
# Why: #4316 in Alexa global
'http://www.elektroda.pl/',
# Why: #4317 in Alexa global
'http://www.howlifeworks.com/',
# Why: #4318 in Alexa global
'http://avjavjav.com/',
# Why: #4319 in Alexa global
'http://www.justunfollow.com/',
# Why: #4320 in Alexa global
'http://www.kindgirls.com/',
# Why: #4321 in Alexa global
'http://www.xrea.com/',
# Why: #4322 in Alexa global
'http://www.songspk.cc/',
# Why: #4323 in Alexa global
'http://www.softbank.jp/',
# Why: #4324 in Alexa global
'http://www.pcstore.com.tw/',
# Why: #4325 in Alexa global
'http://www.impiego24.it/',
# Why: #4326 in Alexa global
'http://www.health.com/',
# Why: #4327 in Alexa global
'http://www.whitehouse.gov/',
# Why: #4328 in Alexa global
'http://www.ulozto.cz/',
# Why: #4329 in Alexa global
'http://www.clickindia.com/',
# Why: #4330 in Alexa global
'http://www.zoosnet.net/',
# Why: #4331 in Alexa global
'http://huihui.cn/',
# Why: #4332 in Alexa global
'http://yingjiesheng.com/',
# Why: #4333 in Alexa global
'http://www.copacet.com/',
# Why: #4334 in Alexa global
'http://www.fluege.de/',
# Why: #4335 in Alexa global
'http://www.uiuc.edu/',
# Why: #4336 in Alexa global
'http://www.funnymama.com/',
# Why: #4337 in Alexa global
'http://www.main.jp/',
# Why: #4338 in Alexa global
'http://www.popsugar.com/',
# Why: #4339 in Alexa global
'http://www.siyahgazete.com/',
# Why: #4340 in Alexa global
'http://www.ligatus.com/',
# Why: #4342 in Alexa global
'http://www.seomastering.com/',
# Why: #4343 in Alexa global
'http://www.nintendo.com/',
# Why: #4344 in Alexa global
'http://www.kuaidi100.com/',
# Why: #4345 in Alexa global
'http://www.motor-talk.de/',
# Why: #4346 in Alexa global
'http://www.p.ht/',
# Why: #4347 in Alexa global
'http://www.care.com/',
# Why: #4348 in Alexa global
'http://www.ttnet.com.tr/',
# Why: #4349 in Alexa global
'http://www.cifraclub.com.br/',
# Why: #4350 in Alexa global
'http://www.yunfile.com/',
# Why: #4351 in Alexa global
'http://www.telechargement-de-ouf.fr/',
# Why: #4352 in Alexa global
'http://www.hotpornshow.com/',
# Why: #4353 in Alexa global
'http://www.jra.go.jp/',
# Why: #4354 in Alexa global
'http://www.upenn.edu/',
# Why: #4355 in Alexa global
'http://www.brg8.com/',
# Why: #4356 in Alexa global
'http://www.techspot.com/',
# Why: #4357 in Alexa global
'http://www.citytalk.tw/',
# Why: #4358 in Alexa global
'http://www.milli.az/',
# Why: #4359 in Alexa global
'http://www.segundamano.mx/',
# Why: #4360 in Alexa global
'http://www.n4g.com/',
# Why: #4361 in Alexa global
'http://www.blogspot.no/',
# Why: #4362 in Alexa global
'http://www.frys.com/',
# Why: #4363 in Alexa global
'http://www.pixhost.org/',
# Why: #4364 in Alexa global
'http://www.washington.edu/',
# Why: #4365 in Alexa global
'http://www.rte.ie/',
# Why: #4366 in Alexa global
'http://www.lockerdome.com/',
# Why: #4367 in Alexa global
'http://www.sblo.jp/',
# Why: #4368 in Alexa global
'http://www.qassimy.com/',
# Why: #4369 in Alexa global
'http://www.signup.wordpress.com/',
# Why: #4370 in Alexa global
'http://www.sochiset.com/',
# Why: #4371 in Alexa global
'http://www.mycokerewards.com/',
# Why: #4372 in Alexa global
'http://www.collegeboard.org/',
# Why: #4373 in Alexa global
'http://www.fengyunzhibo.com/',
# Why: #4374 in Alexa global
'http://www.twickerz.com/',
# Why: #4375 in Alexa global
'http://www.bikroy.com/',
# Why: #4376 in Alexa global
'http://www.apkmania.co/',
# Why: #4378 in Alexa global
'http://www.webrankstats.com/',
# Why: #4379 in Alexa global
'http://www.dl-protect.com/',
# Why: #4380 in Alexa global
'http://www.dr.dk/',
# Why: #4381 in Alexa global
'http://www.emoneyspace.com/',
# Why: #4382 in Alexa global
'http://www.zakzak.co.jp/',
# Why: #4383 in Alexa global
'http://www.rae.es/',
# Why: #4384 in Alexa global
'http://www.theexgirlfriends.com/',
# Why: #4385 in Alexa global
'http://www.gigaom.com/',
# Why: #4386 in Alexa global
'http://www.burmeseclassic.com/',
# Why: #4387 in Alexa global
'http://www.wisc.edu/',
# Why: #4388 in Alexa global
'http://www.ocnk.net/',
# Why: #4389 in Alexa global
'http://www.arcot.com/',
# Why: #4390 in Alexa global
'http://www.paginasamarillas.es/',
# Why: #4391 in Alexa global
'http://www.tunisia-sat.com/',
# Why: #4392 in Alexa global
'http://www.medscape.com/',
# Why: #4393 in Alexa global
'http://www.gameninja.com/',
# Why: #4394 in Alexa global
'http://www.imperiaonline.org/',
# Why: #4395 in Alexa global
'http://www.2ememain.be/',
# Why: #4396 in Alexa global
'http://www.myshopping.com.au/',
# Why: #4397 in Alexa global
'http://www.nvidia.com/',
# Why: #4398 in Alexa global
'http://fanhuan.com/',
# Why: #4399 in Alexa global
'http://www.vista.ir/',
# Why: #4400 in Alexa global
'http://www.dish.com/',
# Why: #4401 in Alexa global
'http://www.cartrade.com/',
# Why: #4402 in Alexa global
'http://www.egopay.com/',
# Why: #4403 in Alexa global
'http://www.sonyentertainmentnetwork.com/',
# Why: #4404 in Alexa global
'http://www.myway.com/',
# Why: #4405 in Alexa global
'http://www.kariyer.net/',
# Why: #4406 in Alexa global
'http://www.thanhnien.com.vn/',
# Why: #4407 in Alexa global
'http://www.gulfnews.com/',
# Why: #4409 in Alexa global
'http://www.flagcounter.com/',
# Why: #4410 in Alexa global
'http://www.yfrog.com/',
# Why: #4411 in Alexa global
'http://www.bigstockphoto.com/',
# Why: #4412 in Alexa global
'http://www.occ.com.mx/',
# Why: #4413 in Alexa global
'http://www.3911.net/',
# Why: #4414 in Alexa global
'http://naszemiasto.pl/',
# Why: #4415 in Alexa global
'http://www.pgatour.com/',
# Why: #4416 in Alexa global
'http://zgjrw.com/',
# Why: #4417 in Alexa global
'http://www.fdj.fr/',
# Why: #4418 in Alexa global
'http://www.motogp.com/',
# Why: #4419 in Alexa global
'http://www.organogold.com/',
# Why: #4420 in Alexa global
'http://www.tamindir.com/',
# Why: #4421 in Alexa global
'http://www.ykb.com/',
# Why: #4422 in Alexa global
'http://www.biglion.ru/',
# Why: #4423 in Alexa global
'http://www.yourfiledownloader.com/',
# Why: #4424 in Alexa global
'http://www.publika.az/',
# Why: #4425 in Alexa global
'http://www.dealnews.com/',
# Why: #4426 in Alexa global
'http://www.warnerbros.com/',
# Why: #4427 in Alexa global
'http://www.ne10.uol.com.br/',
# Why: #4428 in Alexa global
'http://www.wpmudev.org/',
# Why: #4429 in Alexa global
'http://autotimes.com.cn/',
# Why: #4430 in Alexa global
'http://www.pu-results.info/',
# Why: #4431 in Alexa global
'http://www.usajobs.gov/',
# Why: #4432 in Alexa global
'http://www.adsprofitwiz.es/',
# Why: #4433 in Alexa global
'http://www.parallels.com/',
# Why: #4434 in Alexa global
'http://www.thqafawe3lom.com/',
# Why: #4435 in Alexa global
'http://www.xiazaiba.com/',
# Why: #4436 in Alexa global
'http://www.enikos.gr/',
# Why: #4437 in Alexa global
'http://www.m5zn.com/',
# Why: #4438 in Alexa global
'http://www.dir.bg/',
# Why: #4439 in Alexa global
'http://www.ripoffreport.com/',
# Why: #4440 in Alexa global
'http://www.jusbrasil.com.br/',
# Why: #4441 in Alexa global
'http://www.maxifoot.fr/',
# Why: #4442 in Alexa global
'http://www.eva.vn/',
# Why: #4443 in Alexa global
'http://www.dfnhk8.net/',
# Why: #4444 in Alexa global
'http://www.api.ning.com/',
# Why: #4445 in Alexa global
'http://www.ligtv.com.tr/',
# Why: #4446 in Alexa global
'http://www.openrice.com/',
# Why: #4448 in Alexa global
'http://www.999120.net/',
# Why: #4449 in Alexa global
'http://www.pho.to/',
# Why: #4450 in Alexa global
'http://www.indiblogger.in/',
# Why: #4451 in Alexa global
'http://1hai.cn/',
# Why: #4452 in Alexa global
'http://www.jtb.co.jp/',
# Why: #4453 in Alexa global
'http://tfile.me/',
# Why: #4454 in Alexa global
'http://kotak.com/',
# Why: #4455 in Alexa global
'http://www.katproxy.com/',
# Why: #4456 in Alexa global
'http://www.calottery.com/',
# Why: #4457 in Alexa global
'http://www.klmty.net/',
# Why: #4458 in Alexa global
'http://www.endomondo.com/',
# Why: #4459 in Alexa global
'http://www.uploadboy.com/',
# Why: #4460 in Alexa global
'http://www.8tracks.com/',
# Why: #4461 in Alexa global
'http://www.toranoana.jp/',
# Why: #4462 in Alexa global
'http://www.blox.pl/',
# Why: #4463 in Alexa global
'http://www.conrad.de/',
# Why: #4464 in Alexa global
'http://www.sonico.com/',
# Why: #4465 in Alexa global
'http://www.windguru.cz/',
# Why: #4467 in Alexa global
'http://tinhte.vn/',
# Why: #4468 in Alexa global
'http://www.jorudan.co.jp/',
# Why: #4469 in Alexa global
'http://www.grantland.com/',
# Why: #4470 in Alexa global
'http://www.seratnews.ir/',
# Why: #4471 in Alexa global
'http://www.solomono.ru/',
# Why: #4472 in Alexa global
'http://www.foreca.com/',
# Why: #4473 in Alexa global
'http://www.ziprecruiter.com/',
# Why: #4474 in Alexa global
'http://www.chime.in/',
# Why: #4475 in Alexa global
'http://www.intesasanpaolo.com/',
# Why: #4476 in Alexa global
'http://www.softonic.de/',
# Why: #4477 in Alexa global
'http://www.adtech.info/',
# Why: #4478 in Alexa global
'http://www.appgame.com/',
# Why: #4479 in Alexa global
'http://www.opendns.com/',
# Why: #4480 in Alexa global
'http://www.tubekitty.com/',
# Why: #4481 in Alexa global
'http://www.linguee.de/',
# Why: #4482 in Alexa global
'http://www.pepperfry.com/',
# Why: #4483 in Alexa global
'http://www.egou.com/',
# Why: #4484 in Alexa global
'http://www.tweakers.net/',
# Why: #4485 in Alexa global
'http://alfavita.gr/',
# Why: #4486 in Alexa global
'http://www.plusnetwork.com/',
# Why: #4487 in Alexa global
'http://www.timeweb.ru/',
# Why: #4488 in Alexa global
'http://www.maybeporn.com/',
# Why: #4489 in Alexa global
'http://www.gharreh.com/',
# Why: #4490 in Alexa global
'http://www.canoe.ca/',
# Why: #4491 in Alexa global
'http://parsine.com/',
# Why: #4492 in Alexa global
'http://www.yto.net.cn/',
# Why: #4493 in Alexa global
'http://www.ucla.edu/',
# Why: #4494 in Alexa global
'http://www.freeridegames.com/',
# Why: #4495 in Alexa global
'http://www.doctoroz.com/',
# Why: #4496 in Alexa global
'http://www.tradeindia.com/',
# Why: #4497 in Alexa global
'http://www.socialmediabar.com/',
# Why: #4498 in Alexa global
'http://www.yaske.net/',
# Why: #4499 in Alexa global
'http://www.miniih.com/',
# Why: #4500 in Alexa global
'http://www.blog.me/',
# Why: #4501 in Alexa global
'http://www.dn.se/',
# Why: #4502 in Alexa global
'http://www.almos3a.com/',
# Why: #4503 in Alexa global
'http://www.bbvanet.com.mx/',
# Why: #4504 in Alexa global
'http://www.fcbarcelona.com/',
# Why: #4505 in Alexa global
'http://www.web.com/',
# Why: #4506 in Alexa global
'http://www.raaga.com/',
# Why: #4507 in Alexa global
'http://www.yad2.co.il/',
# Why: #4508 in Alexa global
'http://2cto.com/',
# Why: #4509 in Alexa global
'http://www.nx8.com/',
# Why: #4510 in Alexa global
'http://www.modcloth.com/',
# Why: #4511 in Alexa global
'http://www.carsales.com.au/',
# Why: #4512 in Alexa global
'http://www.cooks.com/',
# Why: #4513 in Alexa global
'http://www.fileswap.com/',
# Why: #4514 in Alexa global
'http://www.egyptiansnews.com/',
# Why: #4515 in Alexa global
'http://www.azyya.com/',
# Why: #4516 in Alexa global
'http://www.masreat.com/',
# Why: #4517 in Alexa global
'http://airliners.net/',
# Why: #4518 in Alexa global
'http://www.com-1b.info/',
# Why: #4519 in Alexa global
'http://www.virginmobileusa.com/',
# Why: #4520 in Alexa global
'http://www.pleasantharborrv.com/',
# Why: #4521 in Alexa global
'http://www.gsmhosting.com/',
# Why: #4522 in Alexa global
'http://www.foxbusiness.com/',
# Why: #4523 in Alexa global
'http://www.delfi.lv/',
# Why: #4524 in Alexa global
'http://www.flightaware.com/',
# Why: #4525 in Alexa global
'http://www.ameli.fr/',
# Why: #4526 in Alexa global
'http://fbxtk.com/',
# Why: #4527 in Alexa global
'http://www.purdue.edu/',
# Why: #4528 in Alexa global
'http://www.sbi.co.in/',
# Why: #4529 in Alexa global
'http://www.fotka.pl/',
# Why: #4530 in Alexa global
'http://www.quicksprout.com/',
# Why: #4531 in Alexa global
'http://www.arjwana.com/',
# Why: #4533 in Alexa global
'http://www.affili.net/',
# Why: #4535 in Alexa global
'http://www.5sing.com/',
# Why: #4536 in Alexa global
'http://www.mozilla.com/',
# Why: #4537 in Alexa global
'http://www.apk.tw/',
# Why: #4538 in Alexa global
'http://www.taaza.com/',
# Why: #4539 in Alexa global
'http://www.onetad.com/',
# Why: #4540 in Alexa global
'http://www.vivastreet.it/',
# Why: #4541 in Alexa global
'http://www.leguide.com/',
# Why: #4542 in Alexa global
'http://www.casualclub.com/',
# Why: #4543 in Alexa global
'http://www.wanelo.com/',
# Why: #4544 in Alexa global
'http://www.ipsosinteractive.com/',
# Why: #4545 in Alexa global
'http://www.videohive.net/',
# Why: #4546 in Alexa global
'http://www.fenzhi.com/',
# Why: #4547 in Alexa global
'http://www.lefrecce.it/',
# Why: #4548 in Alexa global
'http://www.bugun.com.tr/',
# Why: #4549 in Alexa global
'http://www.p30world.com/',
# Why: #4550 in Alexa global
'http://www.cuevana.tv/',
# Why: #4551 in Alexa global
'http://www.joins.com/',
# Why: #4552 in Alexa global
'http://www.tvnet.lv/',
# Why: #4553 in Alexa global
'http://aliimg.com/',
# Why: #4554 in Alexa global
'http://www.bellanaija.com/',
# Why: #4555 in Alexa global
'http://www.startpagina.nl/',
# Why: #4556 in Alexa global
'http://www.incometaxindiaefiling.gov.in/',
# Why: #4557 in Alexa global
'http://www.bellemaison.jp/',
# Why: #4558 in Alexa global
'http://www.michigan.gov/',
# Why: #4559 in Alexa global
'http://www.harborfreight.com/',
# Why: #4560 in Alexa global
'http://www.fineartamerica.com/',
# Why: #4561 in Alexa global
'http://www.mysurvey.com/',
# Why: #4562 in Alexa global
'http://www.kapaza.be/',
# Why: #4563 in Alexa global
'http://www.adxpansion.com/',
# Why: #4564 in Alexa global
'http://www.thefind.com/',
# Why: #4565 in Alexa global
'http://www.priyo.com/',
# Why: #4567 in Alexa global
'http://www.burrp.com/',
# Why: #4568 in Alexa global
'http://www.sky.it/',
# Why: #4569 in Alexa global
'http://www.ipad-winners.info/',
# Why: #4570 in Alexa global
'http://www.usgs.gov/',
# Why: #4571 in Alexa global
'http://www.gavick.com/',
# Why: #4572 in Alexa global
'http://www.ellislab.com/',
# Why: #4573 in Alexa global
'http://www.voegol.com.br/',
# Why: #4574 in Alexa global
'http://www.paginebianche.it/',
# Why: #4575 in Alexa global
'http://www.getwebcake.com/',
# Why: #4576 in Alexa global
'http://www.zeroredirect1.com/',
# Why: #4577 in Alexa global
'http://www.gaiaonline.com/',
# Why: #4578 in Alexa global
'http://iqilu.com/',
# Why: #4579 in Alexa global
'http://www.bright.com/',
# Why: #4580 in Alexa global
'http://www.comunidades.net/',
# Why: #4581 in Alexa global
'http://www.webgains.com/',
# Why: #4582 in Alexa global
'http://www.overdrive.com/',
# Why: #4583 in Alexa global
'http://www.bigcommerce.com/',
# Why: #4584 in Alexa global
'http://www.paperpkads.com/',
# Why: #4585 in Alexa global
'http://www.imageporter.com/',
# Why: #4586 in Alexa global
'http://www.lenovo.com.cn/',
# Why: #4587 in Alexa global
'http://www.listal.com/',
# Why: #4588 in Alexa global
'http://www.virgula.uol.com.br/',
# Why: #4589 in Alexa global
'http://www.rbcdaily.ru/',
# Why: #4590 in Alexa global
'http://www.redbus.in/',
# Why: #4591 in Alexa global
'http://www.3bmeteo.com/',
# Why: #4592 in Alexa global
'http://www.earn-on.com/',
# Why: #4593 in Alexa global
'http://www.ae.com/',
# Why: #4594 in Alexa global
'http://www.shoutmeloud.com/',
# Why: #4595 in Alexa global
'http://www.oeeee.com/',
# Why: #4596 in Alexa global
'http://www.usenet.nl/',
# Why: #4597 in Alexa global
'http://www.mediotiempo.com/',
# Why: #4599 in Alexa global
'http://www.prostoporno.net/',
# Why: #4600 in Alexa global
'http://www.bangyoulater.com/',
# Why: #4601 in Alexa global
'http://www.comunio.de/',
# Why: #4602 in Alexa global
'http://www.pureleads.com/',
# Why: #4603 in Alexa global
'http://www.bakeca.it/',
# Why: #4604 in Alexa global
'http://www.trovit.it/',
# Why: #4605 in Alexa global
'http://www.fakku.net/',
# Why: #4606 in Alexa global
'http://www.indeed.fr/',
# Why: #4607 in Alexa global
'http://www.inquisitr.com/',
# Why: #4608 in Alexa global
'http://www.wizards.com/',
# Why: #4609 in Alexa global
'http://www.straightdope.com/',
# Why: #4610 in Alexa global
'http://www.pornpros.com/',
# Why: #4611 in Alexa global
'http://www.s-oman.net/',
# Why: #4612 in Alexa global
'http://www.facilisimo.com/',
# Why: #4613 in Alexa global
'http://www.dostor.org/',
# Why: #4614 in Alexa global
'http://tabloidpulsa.co.id/',
# Why: #4615 in Alexa global
'http://www.shafaf.ir/',
# Why: #4616 in Alexa global
'http://www.bt.dk/',
# Why: #4617 in Alexa global
'http://www.lent.az/',
# Why: #4618 in Alexa global
'http://www.filmaffinity.com/',
# Why: #4619 in Alexa global
'http://www.wjunction.com/',
# Why: #4620 in Alexa global
'http://www.gamefront.com/',
# Why: #4621 in Alexa global
'http://www.photoshelter.com/',
# Why: #4622 in Alexa global
'http://www.cheaptickets.com/',
# Why: #4623 in Alexa global
'http://www.meetic.it/',
# Why: #4624 in Alexa global
'http://www.seochat.com/',
# Why: #4625 in Alexa global
'http://www.livemixtapes.com/',
# Why: #4626 in Alexa global
'http://www.deadline.com/',
# Why: #4627 in Alexa global
'http://www.boingboing.net/',
# Why: #4628 in Alexa global
'http://www.lecai.com/',
# Why: #4629 in Alexa global
'http://www.onetravel.com/',
# Why: #4631 in Alexa global
'http://www.erotictube.me/',
# Why: #4632 in Alexa global
'http://www.svd.se/',
# Why: #4633 in Alexa global
'http://www.pcadvisor.co.uk/',
# Why: #4634 in Alexa global
'http://www.pravda.com.ua/',
# Why: #4636 in Alexa global
'http://www.afisha.ru/',
# Why: #4637 in Alexa global
'http://www.dressupgamesite.com/',
# Why: #4638 in Alexa global
'http://www.mercadopago.com/',
# Why: #4640 in Alexa global
'http://www.bangkokpost.com/',
# Why: #4641 in Alexa global
'http://www.dumpert.nl/',
# Why: #4642 in Alexa global
'http://www.monotaro.com/',
# Why: #4643 in Alexa global
'http://www.bloomingdales.com/',
# Why: #4644 in Alexa global
'http://www.ebayclassifieds.com/',
# Why: #4645 in Alexa global
'http://www.t-online.hu/',
# Why: #4646 in Alexa global
'http://www.2dbook.com/',
# Why: #4647 in Alexa global
'http://www.golfdigest.co.jp/',
# Why: #4648 in Alexa global
'http://www.thekitchn.com/',
# Why: #4649 in Alexa global
'http://www.halifax.co.uk/',
# Why: #4650 in Alexa global
'http://www.tanx.com/',
# Why: #4651 in Alexa global
'http://www.jutarnji.hr/',
# Why: #4652 in Alexa global
'http://www.petardashd.com/',
# Why: #4653 in Alexa global
'http://www.rookee.ru/',
# Why: #4654 in Alexa global
'http://www.showroomprive.com/',
# Why: #4655 in Alexa global
'http://www.sharepoint.com/',
# Why: #4656 in Alexa global
'http://liebiao.com/',
# Why: #4657 in Alexa global
'http://www.miibeian.gov.cn/',
# Why: #4658 in Alexa global
'http://www.pumbaporn.com/',
# Why: #4659 in Alexa global
'http://www.dwnews.com/',
# Why: #4660 in Alexa global
'http://www.sanguosha.com/',
# Why: #4661 in Alexa global
'http://www.pp.cc/',
# Why: #4662 in Alexa global
'http://www.myfc.ir/',
# Why: #4663 in Alexa global
'http://www.alicdn.com/',
# Why: #4664 in Alexa global
'http://www.carmax.com/',
# Why: #4665 in Alexa global
'http://www.defencenet.gr/',
# Why: #4666 in Alexa global
'http://www.cuantarazon.com/',
# Why: #4667 in Alexa global
'http://www.westernunion.com/',
# Why: #4668 in Alexa global
'http://www.links.cn/',
# Why: #4669 in Alexa global
'http://www.natunbarta.com/',
# Why: #4670 in Alexa global
'http://www.sekindo.com/',
# Why: #4671 in Alexa global
'http://78.cn/',
# Why: #4672 in Alexa global
'http://www.edublogs.org/',
# Why: #4673 in Alexa global
'http://www.hotmail.com/',
# Why: #4674 in Alexa global
'http://www.problogger.net/',
# Why: #4675 in Alexa global
'http://www.amardeshonline.com/',
# Why: #4676 in Alexa global
'http://www.gemius.com/',
# Why: #4677 in Alexa global
'http://www.egynews.net/',
# Why: #4678 in Alexa global
'http://www.indiabix.com/',
# Why: #4679 in Alexa global
'http://www.provincial.com/',
# Why: #4680 in Alexa global
'http://www.play.com/',
# Why: #4681 in Alexa global
'http://www.beslist.nl/',
# Why: #4682 in Alexa global
'http://www.nttdocomo.co.jp/',
# Why: #4683 in Alexa global
'http://www.shape.com/',
# Why: #4684 in Alexa global
'http://www.alhilal.com/',
# Why: #4685 in Alexa global
'http://www.irecommend.ru/',
# Why: #4686 in Alexa global
'http://www.cmmnts.com/',
# Why: #4687 in Alexa global
'http://www.1news.az/',
# Why: #4688 in Alexa global
'http://www.kinobanda.net/',
# Why: #4689 in Alexa global
'http://www.banamex.com.mx/',
# Why: #4690 in Alexa global
'http://www.cleanfiles.net/',
# Why: #4691 in Alexa global
'http://www.algeriaforum.net/',
# Why: #4692 in Alexa global
'http://www.zumi.pl/',
# Why: #4693 in Alexa global
'http://www.giallozafferano.it/',
# Why: #4694 in Alexa global
'http://www.news-postseven.com/',
# Why: #4695 in Alexa global
'http://www.firstcry.com/',
# Why: #4696 in Alexa global
'http://www.mhlw.go.jp/',
# Why: #4697 in Alexa global
'http://www.lookforporn.com/',
# Why: #4698 in Alexa global
'http://www.xxsy.net/',
# Why: #4699 in Alexa global
'http://www.scriptmafia.org/',
# Why: #4700 in Alexa global
'http://www.intodns.com/',
# Why: #4701 in Alexa global
'http://www.famitsu.com/',
# Why: #4702 in Alexa global
'http://www.eclipse.org/',
# Why: #4704 in Alexa global
'http://www.net-a-porter.com/',
# Why: #4705 in Alexa global
'http://www.btemplates.com/',
# Why: #4706 in Alexa global
'http://www.topshop.com/',
# Why: #4707 in Alexa global
'http://www.myvidster.com/',
# Why: #4708 in Alexa global
'http://www.calciomercato.com/',
# Why: #4709 in Alexa global
'http://www.arabyonline.com/',
# Why: #4710 in Alexa global
'http://www.lesechos.fr/',
# Why: #4711 in Alexa global
'http://www.empireavenue.com/',
# Why: #4712 in Alexa global
'http://www.damnlol.com/',
# Why: #4713 in Alexa global
'http://www.nukistream.com/',
# Why: #4714 in Alexa global
'http://www.wayport.net/',
# Why: #4715 in Alexa global
'http://www.buienradar.nl/',
# Why: #4716 in Alexa global
'http://www.vivastreet.co.in/',
# Why: #4717 in Alexa global
'http://www.kroger.com/',
# Why: #4718 in Alexa global
'http://www.geocaching.com/',
# Why: #4719 in Alexa global
'http://www.hunantv.com/',
# Why: #4720 in Alexa global
'http://www.fotolog.net/',
# Why: #4721 in Alexa global
'http://www.gunbroker.com/',
# Why: #4722 in Alexa global
'http://www.flalottery.com/',
# Why: #4723 in Alexa global
'http://www.priples.com/',
# Why: #4724 in Alexa global
'http://www.nlayer.net/',
# Why: #4725 in Alexa global
'http://www.trafficshop.com/',
# Why: #4726 in Alexa global
'http://www.standardmedia.co.ke/',
# Why: #4727 in Alexa global
'http://www.finanzen.net/',
# Why: #4728 in Alexa global
'http://www.meta.ua/',
# Why: #4729 in Alexa global
'http://www.gfy.com/',
# Why: #4730 in Alexa global
'http://www.playground.ru/',
# Why: #4731 in Alexa global
'http://www.rp5.ru/',
# Why: #4732 in Alexa global
'http://otnnetwork.net/',
# Why: #4733 in Alexa global
'http://tvmao.com/',
# Why: #4734 in Alexa global
'http://www.hir.ma/',
# Why: #4735 in Alexa global
'http://www.twilightsex.com/',
# Why: #4736 in Alexa global
'http://www.haodou.com/',
# Why: #4737 in Alexa global
'http://www.virgin-atlantic.com/',
# Why: #4738 in Alexa global
'http://www.ankieta-online.pl/',
# Why: #4739 in Alexa global
'http://www.kinkytube.me/',
# Why: #4740 in Alexa global
'http://www.123mplayer.com/',
# Why: #4741 in Alexa global
'http://www.elifting.com/',
# Why: #4742 in Alexa global
'http://www.akiba-online.com/',
# Why: #4743 in Alexa global
'http://www.tcsbank.ru/',
# Why: #4744 in Alexa global
'http://www.gametrailers.com/',
# Why: #4745 in Alexa global
'http://www.dihitt.com/',
# Why: #4746 in Alexa global
'http://www.momoshop.com.tw/',
# Why: #4747 in Alexa global
'http://www.fancy.com/',
# Why: #4748 in Alexa global
'http://admaimai.com/',
# Why: #4749 in Alexa global
'http://www.61.com/',
# Why: #4750 in Alexa global
'http://www.hotchatdirect.com/',
# Why: #4751 in Alexa global
'http://www.penesalud.com/',
# Why: #4752 in Alexa global
'http://www.adsupplyads.com/',
# Why: #4753 in Alexa global
'http://www.robokassa.ru/',
# Why: #4754 in Alexa global
'http://www.brooonzyah.net/',
# Why: #4755 in Alexa global
'http://www.moviesmobile.net/',
# Why: #4756 in Alexa global
'http://www.fuck-mates.com/',
# Why: #4757 in Alexa global
'http://www.ch-news.com/',
# Why: #4758 in Alexa global
'http://www.cwan.com/',
# Why: #4759 in Alexa global
'http://enorth.com.cn/',
# Why: #4760 in Alexa global
'http://www.mec.gov.br/',
# Why: #4761 in Alexa global
'http://www.libertytimes.com.tw/',
# Why: #4762 in Alexa global
'http://www.musiciansfriend.com/',
# Why: #4763 in Alexa global
'http://www.angrybirds.com/',
# Why: #4764 in Alexa global
'http://www.ebrun.com/',
# Why: #4765 in Alexa global
'http://www.kienthuc.net.vn/',
# Why: #4766 in Alexa global
'http://www.morningstar.com/',
# Why: #4767 in Alexa global
'http://www.rasekhoon.net/',
# Why: #4768 in Alexa global
'http://www.techsmith.com/',
# Why: #4769 in Alexa global
'http://www.diy.com/',
# Why: #4770 in Alexa global
'http://www.awwwards.com/',
# Why: #4771 in Alexa global
'http://www.ajc.com/',
# Why: #4772 in Alexa global
'http://www.akismet.com/',
# Why: #4773 in Alexa global
'http://www.itar-tass.com/',
# Why: #4774 in Alexa global
'http://www.60secprofit.com/',
# Why: #4775 in Alexa global
'http://www.videoweed.es/',
# Why: #4776 in Alexa global
'http://www.life.com.tw/',
# Why: #4777 in Alexa global
'http://www.guitarcenter.com/',
# Why: #4778 in Alexa global
'http://www.tv2.dk/',
# Why: #4779 in Alexa global
'http://www.narutom.com/',
# Why: #4780 in Alexa global
'http://www.bittorrent.com/',
# Why: #4781 in Alexa global
'http://www.unionpaysecure.com/',
# Why: #4782 in Alexa global
'http://www.91jm.com/',
# Why: #4783 in Alexa global
'http://www.licindia.in/',
# Why: #4784 in Alexa global
'http://www.bama.ir/',
# Why: #4785 in Alexa global
'http://www.hertz.com/',
# Why: #4786 in Alexa global
'http://www.propertyguru.com.sg/',
# Why: #4787 in Alexa global
'http://city8.com/',
# Why: #4788 in Alexa global
'http://www.blu-ray.com/',
# Why: #4789 in Alexa global
'http://www.abebooks.com/',
# Why: #4790 in Alexa global
'http://www.adidas.com/',
# Why: #4791 in Alexa global
'http://www.weathernews.jp/',
# Why: #4792 in Alexa global
'http://www.sing365.com/',
# Why: #4793 in Alexa global
'http://www.qq163.com/',
# Why: #4794 in Alexa global
'http://www.fashionandyou.com/',
# Why: #4795 in Alexa global
'http://www.lietou.com/',
# Why: #4796 in Alexa global
'http://pia.jp/',
# Why: #4797 in Alexa global
'http://www.eniro.se/',
# Why: #4798 in Alexa global
'http://pengpeng.com/',
# Why: #4799 in Alexa global
'http://haibao.com/',
# Why: #4800 in Alexa global
'http://www.jxedt.com/',
# Why: #4801 in Alexa global
'http://www.crsky.com/',
# Why: #4802 in Alexa global
'http://www.nyu.edu/',
# Why: #4803 in Alexa global
'http://www.minecraftskins.com/',
# Why: #4804 in Alexa global
'http://yangtse.com/',
# Why: #4805 in Alexa global
'http://www.almstba.co/',
# Why: #4806 in Alexa global
'http://parsnews.com/',
# Why: #4807 in Alexa global
'http://www.twiends.com/',
# Why: #4808 in Alexa global
'http://www.dkb.de/',
# Why: #4809 in Alexa global
'http://www.friendscout24.de/',
# Why: #4810 in Alexa global
'http://www.aviny.com/',
# Why: #4811 in Alexa global
'http://www.dig.do/',
# Why: #4812 in Alexa global
'http://www.gamestorrents.com/',
# Why: #4813 in Alexa global
'http://www.guru.com/',
# Why: #4814 in Alexa global
'http://www.bostonglobe.com/',
# Why: #4815 in Alexa global
'http://www.brandalley.fr/',
# Why: #4816 in Alexa global
'http://www.tn.com.ar/',
# Why: #4817 in Alexa global
'http://www.yourwebsite.com/',
# Why: #4818 in Alexa global
'http://www.istgah.com/',
# Why: #4819 in Alexa global
'http://www.cib.com.cn/',
# Why: #4820 in Alexa global
'http://www.e-familynet.com/',
# Why: #4821 in Alexa global
'http://www.hotshame.com/',
# Why: #4822 in Alexa global
'http://www.volkskrant.nl/',
# Why: #4823 in Alexa global
'http://www.karnaval.com/',
# Why: #4824 in Alexa global
'http://www.team-bhp.com/',
# Why: #4825 in Alexa global
'http://www.sinemalar.com/',
# Why: #4826 in Alexa global
'http://www.ipko.pl/',
# Why: #4827 in Alexa global
'http://www.fastcompany.com/',
# Why: #4828 in Alexa global
'http://www.embedupload.com/',
# Why: #4829 in Alexa global
'http://www.gzmama.com/',
# Why: #4830 in Alexa global
'http://www.icicidirect.com/',
# Why: #4831 in Alexa global
'http://www.whatismyip.com/',
# Why: #4832 in Alexa global
'http://www.siasat.pk/',
# Why: #4833 in Alexa global
'http://www.rbi.org.in/',
# Why: #4834 in Alexa global
'http://www.amarillasinternet.com/',
# Why: #4835 in Alexa global
'http://www.netvasco.com.br/',
# Why: #4836 in Alexa global
'http://www.ctvnews.ca/',
# Why: #4837 in Alexa global
'http://www.gad.de/',
# Why: #4838 in Alexa global
'http://www.dailyfx.com/',
# Why: #4839 in Alexa global
'http://www.smartklicks.com/',
# Why: #4840 in Alexa global
'http://www.qoo10.sg/',
# Why: #4841 in Alexa global
'http://www.mlit.go.jp/',
# Why: #4842 in Alexa global
'http://www.cmbc.com.cn/',
# Why: #4843 in Alexa global
'http://www.loc.gov/',
# Why: #4845 in Alexa global
'http://www.playerflv.com/',
# Why: #4846 in Alexa global
'http://www.uta-net.com/',
# Why: #4847 in Alexa global
'http://www.afl.com.au/',
# Why: #4848 in Alexa global
'http://www.mainlink.ru/',
# Why: #4849 in Alexa global
'http://www.pricedekho.com/',
# Why: #4850 in Alexa global
'http://www.wickedfire.com/',
# Why: #4851 in Alexa global
'http://www.rlslog.net/',
# Why: #4852 in Alexa global
'http://www.raiffeisen.at/',
# Why: #4853 in Alexa global
'http://www.easports.com/',
# Why: #4854 in Alexa global
'http://www.groupon.fr/',
# Why: #4855 in Alexa global
'http://www.o2.co.uk/',
# Why: #4856 in Alexa global
'http://www.irangrand.ir/',
# Why: #4857 in Alexa global
'http://www.vuku.tv/',
# Why: #4858 in Alexa global
'http://www.play.pl/',
# Why: #4859 in Alexa global
'http://www.mxtoolbox.com/',
# Why: #4860 in Alexa global
'http://www.promiflash.de/',
# Why: #4861 in Alexa global
'http://www.linode.com/',
# Why: #4862 in Alexa global
'http://www.familysearch.org/',
# Why: #4863 in Alexa global
'http://www.publico.pt/',
# Why: #4864 in Alexa global
'http://www.freepornvideo.me/',
# Why: #4865 in Alexa global
'http://www.uploadbaz.com/',
# Why: #4866 in Alexa global
'http://www.tocmai.ro/',
# Why: #4867 in Alexa global
'http://www.cimbclicks.com.my/',
# Why: #4868 in Alexa global
'http://www.bestporntube.me/',
# Why: #4869 in Alexa global
'http://www.lainformacion.com/',
# Why: #4870 in Alexa global
'http://herschina.com/',
# Why: #4871 in Alexa global
'http://www.fontsquirrel.com/',
# Why: #4872 in Alexa global
'http://www.blip.tv/',
# Why: #4873 in Alexa global
'http://www.caranddriver.com/',
# Why: #4874 in Alexa global
'http://www.qld.gov.au/',
# Why: #4876 in Alexa global
'http://www.pons.eu/',
# Why: #4877 in Alexa global
'http://nascar.com/',
# Why: #4878 in Alexa global
'http://www.hrsmart.com/',
# Why: #4879 in Alexa global
'http://www.tripadvisor.com.au/',
# Why: #4880 in Alexa global
'http://www.hs.fi/',
# Why: #4881 in Alexa global
'http://www.auspost.com.au/',
# Why: #4882 in Alexa global
'http://www.sponsoredreviews.com/',
# Why: #4883 in Alexa global
'http://www.webopedia.com/',
# Why: #4884 in Alexa global
'http://www.sovsport.ru/',
# Why: #4885 in Alexa global
'http://www.firestorage.jp/',
# Why: #4886 in Alexa global
'http://www.bancsabadell.com/',
# Why: #4887 in Alexa global
'http://www.prettyporntube.com/',
# Why: #4888 in Alexa global
'http://www.sodahead.com/',
# Why: #4889 in Alexa global
'http://www.ovi.com/',
# Why: #4890 in Alexa global
'http://www.aleseriale.pl/',
# Why: #4891 in Alexa global
'http://www.mnwan.com/',
# Why: #4892 in Alexa global
'http://www.callofduty.com/',
# Why: #4893 in Alexa global
'http://www.sportskeeda.com/',
# Why: #4894 in Alexa global
'http://cp.cx/',
# Why: #4895 in Alexa global
'http://www.researchgate.net/',
# Why: #4896 in Alexa global
'http://www.michaels.com/',
# Why: #4897 in Alexa global
'http://www.createspace.com/',
# Why: #4898 in Alexa global
'http://www.sprintrade.com/',
# Why: #4899 in Alexa global
'http://www.anonymouse.org/',
# Why: #4900 in Alexa global
'http://www.hautelook.com/',
# Why: #4902 in Alexa global
'http://4gamer.net/',
# Why: #4903 in Alexa global
'http://www.accorhotels.com/',
# Why: #4904 in Alexa global
'http://www.roomkey.com/',
# Why: #4905 in Alexa global
'http://www.guildwars2.com/',
# Why: #4906 in Alexa global
'http://www.poco.cn/',
# Why: #4908 in Alexa global
'http://www.diamond.jp/',
# Why: #4909 in Alexa global
'http://www.cargurus.com/',
# Why: #4910 in Alexa global
'http://www.wpengine.com/',
# Why: #4911 in Alexa global
'http://www.iis.net/',
# Why: #4912 in Alexa global
'http://www.vendaria.com/',
# Why: #4913 in Alexa global
'http://www.argentinawarez.com/',
# Why: #4914 in Alexa global
'http://www.webdesigntunes.com/',
# Why: #4916 in Alexa global
'http://www.allvoices.com/',
# Why: #4917 in Alexa global
'http://www.eprize.com/',
# Why: #4918 in Alexa global
'http://www.pmu.fr/',
# Why: #4919 in Alexa global
'http://www.carrefour.fr/',
# Why: #4922 in Alexa global
'http://www.tax.gov.ir/',
# Why: #4924 in Alexa global
'http://www.ruelala.com/',
# Why: #4925 in Alexa global
'http://www.mainspy.ru/',
# Why: #4926 in Alexa global
'http://www.phpwind.net/',
# Why: #4927 in Alexa global
'http://www.loteriasyapuestas.es/',
# Why: #4928 in Alexa global
'http://www.musavat.com/',
# Why: #4929 in Alexa global
'http://www.lenskart.com/',
# Why: #4930 in Alexa global
'http://www.tv-asahi.co.jp/',
# Why: #4931 in Alexa global
'http://www.refinery29.com/',
# Why: #4932 in Alexa global
'http://www.888poker.es/',
# Why: #4933 in Alexa global
'http://www.denverpost.com/',
# Why: #4934 in Alexa global
'http://www.who.int/',
# Why: #4935 in Alexa global
'http://www.thesims3.com/',
# Why: #4936 in Alexa global
'http://www.jerkhour.com/',
# Why: #4937 in Alexa global
'http://www.lyricsmode.com/',
# Why: #4938 in Alexa global
'http://www.ivillage.com/',
# Why: #4939 in Alexa global
'http://qyer.com/',
# Why: #4940 in Alexa global
'http://www.hktdc.com/',
# Why: #4941 in Alexa global
'http://www.pornoload.com/',
# Why: #4942 in Alexa global
'http://www.bluedart.com/',
# Why: #4943 in Alexa global
'http://www.here.com/',
# Why: #4944 in Alexa global
'http://www.philips.com/',
# Why: #4945 in Alexa global
'http://www.dsebd.org/',
# Why: #4946 in Alexa global
'http://www.tubidy.mobi/',
# Why: #4947 in Alexa global
'http://www.stream.cz/',
# Why: #4948 in Alexa global
'http://www.infojobs.com.br/',
# Why: #4949 in Alexa global
'http://www.soft98.ir/',
# Why: #4950 in Alexa global
'http://www.bolsaparanovatos.com/',
# Why: #4951 in Alexa global
'http://www.mercador.ro/',
# Why: #4952 in Alexa global
'http://www.neogaf.com/',
# Why: #4953 in Alexa global
'http://www.yardbarker.com/',
# Why: #4954 in Alexa global
'http://www.rapidlibrary.com/',
# Why: #4955 in Alexa global
'http://www.xxeronetxx.info/',
# Why: #4956 in Alexa global
'http://www.kaiserpermanente.org/',
# Why: #4957 in Alexa global
'http://www.telstra.com.au/',
# Why: #4958 in Alexa global
'http://www.contra.gr/',
# Why: #4959 in Alexa global
'http://www.laredoute.it/',
# Why: #4960 in Alexa global
'http://www.lipsum.com/',
# Why: #4961 in Alexa global
'http://www.twitlonger.com/',
# Why: #4962 in Alexa global
'http://www.hln.be/',
# Why: #4963 in Alexa global
'http://www.53kf.com/',
# Why: #4964 in Alexa global
'http://www.gofundme.com/',
# Why: #4965 in Alexa global
'http://www.carigold.com/',
# Why: #4966 in Alexa global
'http://www.clips4sale.com/',
# Why: #4967 in Alexa global
'http://www.focalprice.com/',
# Why: #4968 in Alexa global
'http://www.1111.com.tw/',
# Why: #4969 in Alexa global
'http://www.gameaholic.com/',
# Why: #4970 in Alexa global
'http://www.presstv.ir/',
# Why: #4971 in Alexa global
'http://www.puu.sh/',
# Why: #4973 in Alexa global
'http://www.filmlinks4u.net/',
# Why: #4974 in Alexa global
'http://www.traffic-delivery.com/',
# Why: #4975 in Alexa global
'http://www.bebo.com/',
# Why: #4976 in Alexa global
'http://enter.ru/',
# Why: #4977 in Alexa global
'http://www.shufoo.net/',
# Why: #4978 in Alexa global
'http://www.vivo.com.br/',
# Why: #4979 in Alexa global
'http://www.jizzhut.com/',
# Why: #4980 in Alexa global
'http://www.1jux.net/',
# Why: #4981 in Alexa global
'http://www.serebii.net/',
# Why: #4982 in Alexa global
'http://www.translate.ru/',
# Why: #4983 in Alexa global
'http://www.mtv3.fi/',
# Why: #4984 in Alexa global
'http://www.njuskalo.hr/',
# Why: #4985 in Alexa global
'http://www.bell.ca/',
# Why: #4986 in Alexa global
'http://www.myheritage.com/',
# Why: #4987 in Alexa global
'http://www.cic.fr/',
# Why: #4988 in Alexa global
'http://www.mercurynews.com/',
# Why: #4989 in Alexa global
'http://www.alaan.tv/',
# Why: #4990 in Alexa global
'http://www.econsultancy.com/',
# Why: #4991 in Alexa global
'http://www.pornhost.com/',
# Why: #4992 in Alexa global
'http://www.a8.net/',
# Why: #4994 in Alexa global
'http://www.netzero.net/',
# Why: #4995 in Alexa global
'http://www.tracklab101.com/',
# Why: #4996 in Alexa global
'http://www.spanishdict.com/',
# Why: #4997 in Alexa global
'http://www.amctv.com/',
# Why: #4998 in Alexa global
'http://www.erepublik.com/',
# Why: #4999 in Alexa global
'http://www.mk.ru/',
# Why: #5000 in Alexa global
'http://www.publico.es/',
# Why: #5001 in Alexa global
'http://www.newegg.com.cn/',
# Why: #5002 in Alexa global
'http://www.fux.com/',
# Why: #5003 in Alexa global
'http://www.webcamtoy.com/',
# Why: #5004 in Alexa global
'http://www.rahnama.com/',
# Why: #5005 in Alexa global
'http://www.wanyh.com/',
# Why: #5006 in Alexa global
'http://www.ecplaza.net/',
# Why: #5007 in Alexa global
'http://www.mol.gov.sa/',
# Why: #5008 in Alexa global
'http://www.torrentday.com/',
# Why: #5009 in Alexa global
'http://www.hsbc.com.br/',
# Why: #5010 in Alexa global
'http://www.interoperabilitybridges.com/',
# Why: #5011 in Alexa global
'http://www.billmelater.com/',
# Why: #5012 in Alexa global
'http://www.speedanalysis.com/',
# Why: #5013 in Alexa global
'http://www.volusion.com/',
# Why: #5014 in Alexa global
'http://www.mixcloud.com/',
# Why: #5015 in Alexa global
'http://www.weeronline.nl/',
# Why: #5016 in Alexa global
'http://www.tiancity.com/',
# Why: #5017 in Alexa global
'http://www.thehun.com/',
# Why: #5018 in Alexa global
'http://www.comparisons.org/',
# Why: #5019 in Alexa global
'http://www.eurosport.ru/',
# Why: #5020 in Alexa global
'http://www.trendyol.com/',
# Why: #5021 in Alexa global
'http://www.7120.com/',
# Why: #5022 in Alexa global
'http://www.eldiariodeamerica.com/',
# Why: #5023 in Alexa global
'http://www.fap8.com/',
# Why: #5024 in Alexa global
'http://www.joyme.com/',
# Why: #5025 in Alexa global
'http://www.ufl.edu/',
# Why: #5026 in Alexa global
'http://www.cuantocabron.com/',
# Why: #5027 in Alexa global
'http://www.hotmart.com.br/',
# Why: #5028 in Alexa global
'http://www.wolframalpha.com/',
# Why: #5029 in Alexa global
'http://www.cpasbien.com/',
# Why: #5030 in Alexa global
'http://www.sanalpazar.com/',
# Why: #5031 in Alexa global
'http://www.publipt.com/',
# Why: #5032 in Alexa global
'http://www.9ku.com/',
# Why: #5033 in Alexa global
'http://www.officemax.com/',
# Why: #5034 in Alexa global
'http://www.cuny.edu/',
# Why: #5035 in Alexa global
'http://www.gem.pl/',
# Why: #5036 in Alexa global
'http://www.waelelebrashy.com/',
# Why: #5037 in Alexa global
'http://www.coinmill.com/',
# Why: #5038 in Alexa global
'http://www.bet.com/',
# Why: #5039 in Alexa global
'http://www.moskva.fm/',
# Why: #5040 in Alexa global
'http://www.groupalia.com/',
# Why: #5041 in Alexa global
'http://131.com/',
# Why: #5042 in Alexa global
'http://www.pichak.net/',
# Why: #5043 in Alexa global
'http://www.theatlanticwire.com/',
# Why: #5044 in Alexa global
'http://tokyo-sports.co.jp/',
# Why: #5045 in Alexa global
'http://www.laptopmag.com/',
# Why: #5046 in Alexa global
'http://www.worldpay.com/',
# Why: #5047 in Alexa global
'http://www.groupon.pl/',
# Why: #5048 in Alexa global
'http://www.imeimama.com/',
# Why: #5049 in Alexa global
'http://www.torrents.net/',
# Why: #5051 in Alexa global
'http://www.britishcouncil.org/',
# Why: #5052 in Alexa global
'http://www.letsbonus.com/',
# Why: #5053 in Alexa global
'http://www.e-monsite.com/',
# Why: #5054 in Alexa global
'http://www.url.org/',
# Why: #5055 in Alexa global
'http://www.discuz.com/',
# Why: #5056 in Alexa global
'http://www.freepornsite.me/',
# Why: #5057 in Alexa global
'http://www.cheatcc.com/',
# Why: #5058 in Alexa global
'http://www.magicmovies.com/',
# Why: #5059 in Alexa global
'http://www.laterooms.com/',
# Why: #5060 in Alexa global
'http://www.du.ac.in/',
# Why: #5062 in Alexa global
'http://www.uservoice.com/',
# Why: #5063 in Alexa global
'http://www.discas.net/',
# Why: #5064 in Alexa global
'http://www.d1g.com/',
# Why: #5065 in Alexa global
'http://www.explicittube.com/',
# Why: #5066 in Alexa global
'http://www.e-autopay.com/',
# Why: #5067 in Alexa global
'http://3lian.com/',
# Why: #5068 in Alexa global
'http://www.oopsmovs.com/',
# Why: #5069 in Alexa global
'http://www.agenziaentrate.gov.it/',
# Why: #5070 in Alexa global
'http://www.ufc.com/',
# Why: #5071 in Alexa global
'http://www.mooshare.biz/',
# Why: #5072 in Alexa global
'http://www.ankang06.org/',
# Why: #5073 in Alexa global
'http://www.betradar.com/',
# Why: #5074 in Alexa global
'http://www.explosm.net/',
# Why: #5075 in Alexa global
'http://www.silkroad.com/',
# Why: #5076 in Alexa global
'http://www.crackberry.com/',
# Why: #5078 in Alexa global
'http://www.toyota.com/',
# Why: #5079 in Alexa global
'http://www.bongda.com.vn/',
# Why: #5080 in Alexa global
'http://www.europapress.es/',
# Why: #5081 in Alexa global
'http://www.mlxchange.com/',
# Why: #5082 in Alexa global
'http://www.plius.lt/',
# Why: #5083 in Alexa global
'http://www.pitchfork.com/',
# Why: #5084 in Alexa global
'http://www.groupon.de/',
# Why: #5085 in Alexa global
'http://www.hollisterco.com/',
# Why: #5086 in Alexa global
'http://www.hasoffers.com/',
# Why: #5087 in Alexa global
'http://www.miami.com/',
# Why: #5089 in Alexa global
'http://www.dslreports.com/',
# Why: #5090 in Alexa global
'http://www.blinkweb.com/',
# Why: #5091 in Alexa global
'http://www.alamaula.com/',
# Why: #5092 in Alexa global
'http://www.leonardo.it/',
# Why: #5093 in Alexa global
'http://www.very.co.uk/',
# Why: #5094 in Alexa global
'http://www.globalsources.com/',
# Why: #5095 in Alexa global
'http://www.viator.com/',
# Why: #5096 in Alexa global
'http://www.greenwichmeantime.com/',
# Why: #5097 in Alexa global
'http://www.appannie.com/',
# Why: #5099 in Alexa global
'http://www.eldorado.ru/',
# Why: #5100 in Alexa global
'http://www.canadiantire.ca/',
# Why: #5101 in Alexa global
'http://www.enjin.com/',
# Why: #5102 in Alexa global
'http://szhome.com/',
# Why: #5103 in Alexa global
'http://www.news-us.jp/',
# Why: #5104 in Alexa global
'http://www.phim3s.net/',
# Why: #5105 in Alexa global
'http://www.bash.im/',
# Why: #5106 in Alexa global
'http://www.immi.gov.au/',
# Why: #5107 in Alexa global
'http://www.cwb.gov.tw/',
# Why: #5108 in Alexa global
'http://www.enjoydressup.com/',
# Why: #5109 in Alexa global
'http://www.thesuperficial.com/',
# Why: #5110 in Alexa global
'http://www.bunshun.jp/',
# Why: #5111 in Alexa global
'http://www.91mobiles.com/',
# Why: #5112 in Alexa global
'http://www.libertaddigital.com/',
# Why: #5113 in Alexa global
'http://www.po-kaki-to.com/',
# Why: #5114 in Alexa global
'http://www.truelocal.com.au/',
# Why: #5115 in Alexa global
'http://www.centrum24.pl/',
# Why: #5116 in Alexa global
'http://www.zylom.com/',
# Why: #5117 in Alexa global
'http://www.mypornmotion.com/',
# Why: #5118 in Alexa global
'http://www.skybet.com/',
# Why: #5119 in Alexa global
'http://www.soccermanager.com/',
# Why: #5120 in Alexa global
'http://www.styleauto.com.cn/',
# Why: #5121 in Alexa global
'http://www.poriborton.com/',
# Why: #5122 in Alexa global
'http://www.mozzi.com/',
# Why: #5123 in Alexa global
'http://www.eset.com/',
# Why: #5124 in Alexa global
'http://www.chelseafc.com/',
# Why: #5125 in Alexa global
'http://www.amulyam.in/',
# Why: #5126 in Alexa global
'http://www.argaam.com/',
# Why: #5127 in Alexa global
'http://www.mnn.com/',
# Why: #5128 in Alexa global
'http://www.papystreaming.com/',
# Why: #5129 in Alexa global
'http://www.hostelbookers.com/',
# Why: #5130 in Alexa global
'http://www.vatera.hu/',
# Why: #5131 in Alexa global
'http://www.pciconcursos.com.br/',
# Why: #5132 in Alexa global
'http://www.milenio.com/',
# Why: #5133 in Alexa global
'http://www.yellowbook.com/',
# Why: #5134 in Alexa global
'http://www.mobilepriceindia.co.in/',
# Why: #5135 in Alexa global
'http://www.naked.com/',
# Why: #5136 in Alexa global
'http://www.lazada.vn/',
# Why: #5137 in Alexa global
'http://www.70e.com/',
# Why: #5138 in Alexa global
'http://www.mapy.cz/',
# Why: #5139 in Alexa global
'http://www.vodafone.es/',
# Why: #5140 in Alexa global
'http://www.zbiornik.com/',
# Why: #5142 in Alexa global
'http://www.fc2web.com/',
# Why: #5143 in Alexa global
'http://www.rghost.ru/',
# Why: #5144 in Alexa global
'http://www.avvo.com/',
# Why: #5145 in Alexa global
'http://www.fardanews.com/',
# Why: #5146 in Alexa global
'http://www.pcbeta.com/',
# Why: #5147 in Alexa global
'http://www.hibapress.com/',
# Why: #5148 in Alexa global
'http://www.gamehouse.com/',
# Why: #5149 in Alexa global
'http://www.macworld.com/',
# Why: #5150 in Alexa global
'http://www.qantas.com.au/',
# Why: #5151 in Alexa global
'http://www.dba.dk/',
# Why: #5152 in Alexa global
'http://www.inttrax.com/',
# Why: #5153 in Alexa global
'http://www.conejox.com/',
# Why: #5154 in Alexa global
'http://www.immobiliare.it/',
# Why: #5155 in Alexa global
'http://www.sparkasse.at/',
# Why: #5156 in Alexa global
'http://www.udemy.com/',
# Why: #5157 in Alexa global
'http://www.accenture.com/',
# Why: #5158 in Alexa global
'http://www.pokerstrategy.com/',
# Why: #5159 in Alexa global
'http://www.leroymerlin.fr/',
# Why: #5160 in Alexa global
'http://www.sweetkiss.me/',
# Why: #5161 in Alexa global
'http://www.siriusxm.com/',
# Why: #5162 in Alexa global
'http://www.nieuwsblad.be/',
# Why: #5163 in Alexa global
'http://www.blogun.ru/',
# Why: #5164 in Alexa global
'http://www.ojogos.com.br/',
# Why: #5165 in Alexa global
'http://www.lexilogos.com/',
# Why: #5166 in Alexa global
'http://www.c-and-a.com/',
# Why: #5167 in Alexa global
'http://www.authorstream.com/',
# Why: #5168 in Alexa global
'http://www.newser.com/',
# Why: #5169 in Alexa global
'http://www.minube.com/',
# Why: #5170 in Alexa global
'http://www.lawtime.cn/',
# Why: #5171 in Alexa global
'http://www.yellowpages.com.au/',
# Why: #5172 in Alexa global
'http://www.torrentfreak.com/',
# Why: #5173 in Alexa global
'http://www.expatriates.com/',
# Why: #5174 in Alexa global
'http://51credit.com/',
# Why: #5175 in Alexa global
'http://www.rawstory.com/',
# Why: #5176 in Alexa global
'http://www.crictime.com/',
# Why: #5177 in Alexa global
'http://www.ladolcevitae.com/',
# Why: #5178 in Alexa global
'http://www.astro.com/',
# Why: #5179 in Alexa global
'http://www.riverisland.com/',
# Why: #5180 in Alexa global
'http://www.myzamana.com/',
# Why: #5181 in Alexa global
'http://www.xpg.com.br/',
# Why: #5182 in Alexa global
'http://www.svt.se/',
# Why: #5183 in Alexa global
'http://www.ymlp.com/',
# Why: #5184 in Alexa global
'http://www.coupondunia.in/',
# Why: #5185 in Alexa global
'http://www.mymovies.it/',
# Why: #5186 in Alexa global
'http://www.portaleducacao.com.br/',
# Why: #5187 in Alexa global
'http://watchabc.go.com/',
# Why: #5188 in Alexa global
'http://www.scrabblefinder.com/',
# Why: #5189 in Alexa global
'http://www.2hua.com/',
# Why: #5190 in Alexa global
'http://www.guiaconsumidor.com/',
# Why: #5191 in Alexa global
'http://jzpt.com/',
# Why: #5192 in Alexa global
'http://www.jino.ru/',
# Why: #5193 in Alexa global
'http://www.google.tt/',
# Why: #5194 in Alexa global
'http://www.addwallet.com/',
# Why: #5195 in Alexa global
'http://www.enom.com/',
# Why: #5197 in Alexa global
'http://www.searchfreemp3.com/',
# Why: #5198 in Alexa global
'http://www.spox.com/',
# Why: #5199 in Alexa global
'http://www.ename.net/',
# Why: #5200 in Alexa global
'http://www.researchnow.com/',
# Why: #5201 in Alexa global
'http://www.decathlon.fr/',
# Why: #5202 in Alexa global
'http://www.j-cast.com/',
# Why: #5203 in Alexa global
'http://www.updatetube.com/',
# Why: #5204 in Alexa global
'http://www.polo.com/',
# Why: #5205 in Alexa global
'http://www.asiaone.com/',
# Why: #5206 in Alexa global
'http://www.kkiste.to/',
# Why: #5207 in Alexa global
'http://www.frmtr.com/',
# Why: #5208 in Alexa global
'http://www.skai.gr/',
# Why: #5209 in Alexa global
'http://www.zovi.com/',
# Why: #5210 in Alexa global
'http://www.qiwi.ru/',
# Why: #5211 in Alexa global
'http://www.stfucollege.com/',
# Why: #5212 in Alexa global
'http://www.carros.com.br/',
# Why: #5213 in Alexa global
'http://www.privatejobshub.blogspot.in/',
# Why: #5214 in Alexa global
'http://www.englishtown.com/',
# Why: #5215 in Alexa global
'http://www.info.com/',
# Why: #5216 in Alexa global
'http://www.multiclickbrasil.com.br/',
# Why: #5217 in Alexa global
'http://www.gazeteoku.com/',
# Why: #5218 in Alexa global
'http://www.kinghost.com/',
# Why: #5219 in Alexa global
'http://www.izismile.com/',
# Why: #5220 in Alexa global
'http://www.gopro.com/',
# Why: #5221 in Alexa global
'http://www.uspto.gov/',
# Why: #5222 in Alexa global
'http://www.testberichte.de/',
# Why: #5223 in Alexa global
'http://www.fs.to/',
# Why: #5224 in Alexa global
'http://www.sketchtoy.com/',
# Why: #5225 in Alexa global
'http://www.sinarharian.com.my/',
# Why: #5226 in Alexa global
'http://www.stylemode.com/',
# Why: #5227 in Alexa global
'http://www.v7n.com/',
# Why: #5228 in Alexa global
'http://www.livenation.com/',
# Why: #5229 in Alexa global
'http://www.firstrow1.eu/',
# Why: #5230 in Alexa global
'http://www.joomlaforum.ru/',
# Why: #5231 in Alexa global
'http://www.sharecare.com/',
# Why: #5232 in Alexa global
'http://www.vetogate.com/',
# Why: #5233 in Alexa global
'http://www.series.ly/',
# Why: #5234 in Alexa global
'http://www.property24.com/',
# Why: #5235 in Alexa global
'http://www.payamsara.com/',
# Why: #5236 in Alexa global
'http://www.webstarts.com/',
# Why: #5237 in Alexa global
'http://www.renfe.es/',
# Why: #5238 in Alexa global
'http://www.fatcow.com/',
# Why: #5239 in Alexa global
'http://www.24ur.com/',
# Why: #5240 in Alexa global
'http://www.lide.cz/',
# Why: #5241 in Alexa global
'http://www.sabayacafe.com/',
# Why: #5242 in Alexa global
'http://www.prodavalnik.com/',
# Why: #5243 in Alexa global
'http://www.hyves.nl/',
# Why: #5244 in Alexa global
'http://www.groupon.jp/',
# Why: #5245 in Alexa global
'http://www.almaany.com/',
# Why: #5246 in Alexa global
'http://www.xero.com/',
# Why: #5247 in Alexa global
'http://www.celluway.com/',
# Why: #5248 in Alexa global
'http://www.mapbar.com/',
# Why: #5249 in Alexa global
'http://www.vecernji.hr/',
# Why: #5250 in Alexa global
'http://www.konga.com/',
# Why: #5251 in Alexa global
'http://www.fresherslive.com/',
# Why: #5252 in Alexa global
'http://www.nova.cz/',
# Why: #5253 in Alexa global
'http://www.onlinefwd.com/',
# Why: #5254 in Alexa global
'http://www.petco.com/',
# Why: #5255 in Alexa global
'http://www.benisonapparel.com/',
# Why: #5256 in Alexa global
'http://www.jango.com/',
# Why: #5257 in Alexa global
'http://mangocity.com/',
# Why: #5258 in Alexa global
'http://www.gamefly.com/',
# Why: #5259 in Alexa global
'http://www.igma.tv/',
# Why: #5260 in Alexa global
'http://www.21cineplex.com/',
# Why: #5261 in Alexa global
'http://www.fblife.com/',
# Why: #5262 in Alexa global
'http://www.moe.gov.eg/',
# Why: #5263 in Alexa global
'http://www.heydouga.com/',
# Why: #5264 in Alexa global
'http://buildhr.com/',
# Why: #5265 in Alexa global
'http://www.mmo-champion.com/',
# Why: #5266 in Alexa global
'http://www.ithome.com/',
# Why: #5267 in Alexa global
'http://www.krakow.pl/',
# Why: #5268 in Alexa global
'http://www.history.com/',
# Why: #5269 in Alexa global
'http://www.jc001.cn/',
# Why: #5270 in Alexa global
'http://www.privatehomeclips.com/',
# Why: #5271 in Alexa global
'http://www.wasu.cn/',
# Why: #5272 in Alexa global
'http://www.bazos.cz/',
# Why: #5273 in Alexa global
'http://www.appchina.com/',
# Why: #5274 in Alexa global
'http://www.helpster.de/',
# Why: #5275 in Alexa global
'http://www.51hejia.com/',
# Why: #5276 in Alexa global
'http://www.fuckbadbitches.com/',
# Why: #5277 in Alexa global
'http://www.toyota-autocenter.com/',
# Why: #5278 in Alexa global
'http://www.alnaharegypt.com/',
# Why: #5280 in Alexa global
'http://www.eastbay.com/',
# Why: #5281 in Alexa global
'http://www.softonic.com.br/',
# Why: #5282 in Alexa global
'http://www.translit.ru/',
# Why: #5283 in Alexa global
'http://www.justcloud.com/',
# Why: #5284 in Alexa global
'http://www.validclick.net/',
# Why: #5285 in Alexa global
'http://www.seneweb.com/',
# Why: #5286 in Alexa global
'http://www.fsiblog.com/',
# Why: #5287 in Alexa global
'http://www.williamhill.it/',
# Why: #5288 in Alexa global
'http://www.twitchy.com/',
# Why: #5289 in Alexa global
'http://www.y4yy.com/',
# Why: #5290 in Alexa global
'http://www.gouv.qc.ca/',
# Why: #5291 in Alexa global
'http://www.nubiles.net/',
# Why: #5292 in Alexa global
'http://www.marvel.com/',
# Why: #5293 in Alexa global
'http://www.helpmefindyour.info/',
# Why: #5294 in Alexa global
'http://www.tripadvisor.ca/',
# Why: #5295 in Alexa global
'http://www.joomlart.com/',
# Why: #5296 in Alexa global
'http://www.m18.com/',
# Why: #5297 in Alexa global
'http://www.orgasmatrix.com/',
# Why: #5298 in Alexa global
'http://www.bidoo.com/',
# Why: #5299 in Alexa global
'http://www.rogers.com/',
# Why: #5300 in Alexa global
'http://www.informationng.com/',
# Why: #5301 in Alexa global
'http://www.voyage-prive.com/',
# Why: #5302 in Alexa global
'http://www.comingsoon.net/',
# Why: #5303 in Alexa global
'http://www.searchmetrics.com/',
# Why: #5304 in Alexa global
'http://www.jetztspielen.de/',
# Why: #5305 in Alexa global
'http://www.mathxl.com/',
# Why: #5306 in Alexa global
'http://www.telmex.com/',
# Why: #5307 in Alexa global
'http://www.purpleporno.com/',
# Why: #5308 in Alexa global
'http://www.coches.net/',
# Why: #5309 in Alexa global
'http://hamusoku.com/',
# Why: #5310 in Alexa global
'http://www.link-assistant.com/',
# Why: #5311 in Alexa global
'http://www.gosur.com/',
# Why: #5312 in Alexa global
'http://www.torrentcrazy.com/',
# Why: #5313 in Alexa global
'http://www.funny-games.biz/',
# Why: #5314 in Alexa global
'http://www.bseindia.com/',
# Why: #5315 in Alexa global
'http://www.promosite.ru/',
# Why: #5316 in Alexa global
'http://www.google.mn/',
# Why: #5317 in Alexa global
'http://www.cartoonnetworkarabic.com/',
# Why: #5318 in Alexa global
'http://www.icm.edu.pl/',
# Why: #5319 in Alexa global
'http://ttt4.com/',
# Why: #5321 in Alexa global
'http://www.pepperjamnetwork.com/',
# Why: #5322 in Alexa global
'http://www.lolzbook.com/',
# Why: #5323 in Alexa global
'http://www.nationalpost.com/',
# Why: #5324 in Alexa global
'http://www.tukif.com/',
# Why: #5325 in Alexa global
'http://www.club-asteria.com/',
# Why: #5326 in Alexa global
'http://www.7search.com/',
# Why: #5327 in Alexa global
'http://www.kasikornbank.com/',
# Why: #5328 in Alexa global
'http://www.ebay.ie/',
# Why: #5329 in Alexa global
'http://www.sexlunch.com/',
# Why: #5330 in Alexa global
'http://www.qype.com/',
# Why: #5331 in Alexa global
'http://www.sankakucomplex.com/',
# Why: #5333 in Alexa global
'http://www.flashback.org/',
# Why: #5334 in Alexa global
'http://www.streamhunter.eu/',
# Why: #5335 in Alexa global
'http://www.rsb.ru/',
# Why: #5336 in Alexa global
'http://www.royalporntube.com/',
# Why: #5337 in Alexa global
'http://www.diretta.it/',
# Why: #5338 in Alexa global
'http://www.yummly.com/',
# Why: #5339 in Alexa global
'http://www.dom2.ru/',
# Why: #5340 in Alexa global
'http://www.2144.cn/',
# Why: #5341 in Alexa global
'http://www.metoffice.gov.uk/',
# Why: #5342 in Alexa global
'http://www.goodbaby.com/',
# Why: #5343 in Alexa global
'http://www.pornbb.org/',
# Why: #5344 in Alexa global
'http://www.formspring.me/',
# Why: #5345 in Alexa global
'http://www.google.com.cy/',
# Why: #5346 in Alexa global
'http://www.purepeople.com/',
# Why: #5347 in Alexa global
'http://www.epnet.com/',
# Why: #5348 in Alexa global
'http://www.penny-arcade.com/',
# Why: #5349 in Alexa global
'http://www.onlinekhabar.com/',
# Why: #5350 in Alexa global
'http://www.vcommission.com/',
# Why: #5351 in Alexa global
'http://www.zimabdk.com/',
# Why: #5352 in Alexa global
'http://www.car.gr/',
# Why: #5353 in Alexa global
'http://www.wat.tv/',
# Why: #5354 in Alexa global
'http://www.nnn.ru/',
# Why: #5355 in Alexa global
'http://www.arvixe.com/',
# Why: #5356 in Alexa global
'http://www.buxp.org/',
# Why: #5357 in Alexa global
'http://www.shaw.ca/',
# Why: #5358 in Alexa global
'http://cnyes.com/',
# Why: #5359 in Alexa global
'http://www.casa.it/',
# Why: #5360 in Alexa global
'http://233.com/',
# Why: #5361 in Alexa global
'http://www.text.ru/',
# Why: #5362 in Alexa global
'http://www.800notes.com/',
# Why: #5363 in Alexa global
'http://www.banki.ru/',
# Why: #5364 in Alexa global
'http://www.marinetraffic.com/',
# Why: #5365 in Alexa global
'http://www.meteo.gr/',
# Why: #5366 in Alexa global
'http://www.thetrainline.com/',
# Why: #5367 in Alexa global
'http://www.blogspot.ch/',
# Why: #5368 in Alexa global
'http://www.netaffiliation.com/',
# Why: #5370 in Alexa global
'http://www.olx.co.id/',
# Why: #5371 in Alexa global
'http://www.slando.kz/',
# Why: #5372 in Alexa global
'http://www.nordea.se/',
# Why: #5373 in Alexa global
'http://www.xbabe.com/',
# Why: #5374 in Alexa global
'http://www.bibsonomy.org/',
# Why: #5375 in Alexa global
'http://www.moneynews.com/',
# Why: #5376 in Alexa global
'http://265g.com/',
# Why: #5377 in Alexa global
'http://www.horoscope.com/',
# Why: #5378 in Alexa global
'http://www.home.ne.jp/',
# Why: #5379 in Alexa global
'http://www.cztv.com.cn/',
# Why: #5380 in Alexa global
'http://www.yammer.com/',
# Why: #5381 in Alexa global
'http://www.sextgem.com/',
# Why: #5382 in Alexa global
'http://www.tribune.com.pk/',
# Why: #5383 in Alexa global
'http://www.topeuro.biz/',
# Why: #5385 in Alexa global
'http://www.perfectgirls.xxx/',
# Why: #5386 in Alexa global
'http://ssc.nic.in/',
# Why: #5387 in Alexa global
'http://www.8264.com/',
# Why: #5388 in Alexa global
'http://www.flvrunner.com/',
# Why: #5389 in Alexa global
'http://www.gry.pl/',
# Why: #5390 in Alexa global
'http://www.sto.cn/',
# Why: #5391 in Alexa global
'http://www.pravda.ru/',
# Why: #5392 in Alexa global
'http://www.fulltiltpoker.com/',
# Why: #5393 in Alexa global
'http://www.kure.tv/',
# Why: #5394 in Alexa global
'http://www.turbo.az/',
# Why: #5395 in Alexa global
'http://www.ujian.cc/',
# Why: #5396 in Alexa global
'http://www.mustseeindia.com/',
# Why: #5397 in Alexa global
'http://www.thithtoolwin.com/',
# Why: #5398 in Alexa global
'http://www.chiphell.com/',
# Why: #5399 in Alexa global
'http://www.baidu.cn/',
# Why: #5400 in Alexa global
'http://www.spieletipps.de/',
# Why: #5401 in Alexa global
'http://www.portail.free.fr/',
# Why: #5402 in Alexa global
'http://www.hbr.org/',
# Why: #5403 in Alexa global
'http://www.sex-hq.com/',
# Why: #5404 in Alexa global
'http://www.webdeveloper.com/',
# Why: #5405 in Alexa global
'http://www.cloudzer.net/',
# Why: #5406 in Alexa global
'http://www.vagas.com.br/',
# Why: #5407 in Alexa global
'http://www.anspress.com/',
# Why: #5408 in Alexa global
'http://www.beitaichufang.com/',
# Why: #5409 in Alexa global
'http://www.songkick.com/',
# Why: #5410 in Alexa global
'http://www.tsite.jp/',
# Why: #5411 in Alexa global
'http://www.oyunlari.net/',
# Why: #5412 in Alexa global
'http://www.unfollowers.me/',
# Why: #5413 in Alexa global
'http://www.computrabajo.com.mx/',
# Why: #5414 in Alexa global
'http://www.usp.br/',
# Why: #5415 in Alexa global
'http://www.parseek.com/',
# Why: #5416 in Alexa global
'http://www.salary.com/',
# Why: #5417 in Alexa global
'http://www.navyfcu.org/',
# Why: #5418 in Alexa global
'http://www.bigpond.com/',
# Why: #5419 in Alexa global
'http://www.joann.com/',
# Why: #5420 in Alexa global
'http://www.ajansspor.com/',
# Why: #5421 in Alexa global
'http://www.burnews.com/',
# Why: #5422 in Alexa global
'http://www.myrecipes.com/',
# Why: #5423 in Alexa global
'http://www.mt5.com/',
# Why: #5424 in Alexa global
'http://www.webconfs.com/',
# Why: #5425 in Alexa global
'http://www.offcn.com/',
# Why: #5426 in Alexa global
'http://www.travian.com.tr/',
# Why: #5427 in Alexa global
'http://www.animenewsnetwork.com/',
# Why: #5428 in Alexa global
'http://www.smartshopping.com/',
# Why: #5429 in Alexa global
'http://www.twojapogoda.pl/',
# Why: #5430 in Alexa global
'http://www.tigerairways.com/',
# Why: #5431 in Alexa global
'http://www.qoo10.jp/',
# Why: #5432 in Alexa global
'http://www.archiveofourown.org/',
# Why: #5433 in Alexa global
'http://www.qq937.com/',
# Why: #5434 in Alexa global
'http://www.meneame.net/',
# Why: #5436 in Alexa global
'http://www.joyclub.de/',
# Why: #5437 in Alexa global
'http://www.yy.com/',
# Why: #5438 in Alexa global
'http://www.weddingwire.com/',
# Why: #5439 in Alexa global
'http://www.moddb.com/',
# Why: #5440 in Alexa global
'http://www.acervoamador.com/',
# Why: #5441 in Alexa global
'http://www.stgeorge.com.au/',
# Why: #5442 in Alexa global
'http://www.forumhouse.ru/',
# Why: #5443 in Alexa global
'http://www.mp3xd.com/',
# Why: #5444 in Alexa global
'http://www.nomura.co.jp/',
# Why: #5445 in Alexa global
'http://www.lionair.co.id/',
# Why: #5446 in Alexa global
'http://www.needtoporn.com/',
# Why: #5447 in Alexa global
'http://www.playcast.ru/',
# Why: #5448 in Alexa global
'http://www.paheal.net/',
# Why: #5449 in Alexa global
'http://www.finishline.com/',
# Why: #5450 in Alexa global
'http://www.sep.gob.mx/',
# Why: #5451 in Alexa global
'http://www.comenity.net/',
# Why: #5452 in Alexa global
'http://www.tqn.com/',
# Why: #5453 in Alexa global
'http://www.eroticads.com/',
# Why: #5454 in Alexa global
'http://www.svpressa.ru/',
# Why: #5455 in Alexa global
'http://www.dtvideo.com/',
# Why: #5456 in Alexa global
'http://www.mobile.free.fr/',
# Why: #5457 in Alexa global
'http://www.privat24.ua/',
# Why: #5458 in Alexa global
'http://www.mp3sk.net/',
# Why: #5459 in Alexa global
'http://www.atlas.sk/',
# Why: #5460 in Alexa global
'http://www.aib.ie/',
# Why: #5461 in Alexa global
'http://www.shockwave.com/',
# Why: #5462 in Alexa global
'http://www.qatarairways.com/',
# Why: #5463 in Alexa global
'http://www.theladders.com/',
# Why: #5464 in Alexa global
'http://www.dsnetwb.com/',
# Why: #5465 in Alexa global
'http://www.expansiondirecto.com/',
# Why: #5466 in Alexa global
'http://www.povarenok.ru/',
# Why: #5467 in Alexa global
'http://www.moneysupermarket.com/',
# Why: #5468 in Alexa global
'http://www.getchu.com/',
# Why: #5469 in Alexa global
'http://www.gay.com/',
# Why: #5470 in Alexa global
'http://www.hsbc.com.mx/',
# Why: #5471 in Alexa global
'http://www.textsale.ru/',
# Why: #5472 in Alexa global
'http://www.kadinlarkulubu.com/',
# Why: #5473 in Alexa global
'http://www.scientificamerican.com/',
# Why: #5474 in Alexa global
'http://www.hillnews.com/',
# Why: #5475 in Alexa global
'http://www.tori.fi/',
# Why: #5476 in Alexa global
'http://www.6tie.com/',
# Why: #5477 in Alexa global
'http://www.championselect.net/',
# Why: #5478 in Alexa global
'http://gtobal.com/',
# Why: #5479 in Alexa global
'http://www.bangkokbank.com/',
# Why: #5481 in Alexa global
'http://www.akakce.com/',
# Why: #5482 in Alexa global
'http://www.smarter.com/',
# Why: #5483 in Alexa global
'http://www.totalvideoplugin.com/',
# Why: #5484 in Alexa global
'http://www.dmir.ru/',
# Why: #5485 in Alexa global
'http://www.rpp.com.pe/',
# Why: #5486 in Alexa global
'http://www.uhaul.com/',
# Why: #5487 in Alexa global
'http://www.kayako.com/',
# Why: #5488 in Alexa global
'http://www.buyvip.com/',
# Why: #5489 in Alexa global
'http://www.sixrevisions.com/',
# Why: #5490 in Alexa global
'http://www.army.mil/',
# Why: #5491 in Alexa global
'http://www.rediffmail.com/',
# Why: #5492 in Alexa global
'http://www.gsis.gr/',
# Why: #5494 in Alexa global
'http://www.destinia.com/',
# Why: #5495 in Alexa global
'http://www.behindwoods.com/',
# Why: #5496 in Alexa global
'http://www.wearehairy.com/',
# Why: #5497 in Alexa global
'http://www.coqnu.com/',
# Why: #5498 in Alexa global
'http://www.soundclick.com/',
# Why: #5499 in Alexa global
'http://www.drive.ru/',
# Why: #5501 in Alexa global
'http://www.cam4.fr/',
# Why: #5502 in Alexa global
'http://www.jschina.com.cn/',
# Why: #5503 in Alexa global
'http://www.bakusai.com/',
# Why: #5504 in Alexa global
'http://www.thailandtorrent.com/',
# Why: #5505 in Alexa global
'http://www.videosz.com/',
# Why: #5506 in Alexa global
'http://www.eporner.com/',
# Why: #5507 in Alexa global
'http://www.rakuten-sec.co.jp/',
# Why: #5508 in Alexa global
'http://www.stltoday.com/',
# Why: #5509 in Alexa global
'http://www.ilmessaggero.it/',
# Why: #5510 in Alexa global
'http://www.theregister.co.uk/',
# Why: #5511 in Alexa global
'http://www.bloggang.com/',
# Why: #5512 in Alexa global
'http://www.eonet.jp/',
# Why: #5513 in Alexa global
'http://www.nastyvideotube.com/',
# Why: #5514 in Alexa global
'http://www.doityourself.com/',
# Why: #5515 in Alexa global
'http://www.rp-online.de/',
# Why: #5516 in Alexa global
'http://www.wow-impulse.ru/',
# Why: #5517 in Alexa global
'http://www.kar.nic.in/',
# Why: #5518 in Alexa global
'http://www.bershka.com/',
# Why: #5519 in Alexa global
'http://www.neteller.com/',
# Why: #5520 in Alexa global
'http://www.adevarul.ro/',
# Why: #5521 in Alexa global
'http://www.divxtotal.com/',
# Why: #5522 in Alexa global
'http://www.bolshoyvopros.ru/',
# Why: #5523 in Alexa global
'http://www.letudiant.fr/',
# Why: #5524 in Alexa global
'http://www.xinshipu.com/',
# Why: #5525 in Alexa global
'http://www.vh1.com/',
# Why: #5526 in Alexa global
'http://www.excite.com/',
# Why: #5527 in Alexa global
'http://www.somewhereinblog.net/',
# Why: #5529 in Alexa global
'http://www.mcgraw-hill.com/',
# Why: #5530 in Alexa global
'http://www.patheos.com/',
# Why: #5531 in Alexa global
'http://www.webdesignledger.com/',
# Why: #5532 in Alexa global
'http://www.plus28.com/',
# Why: #5533 in Alexa global
'http://www.adultwork.com/',
# Why: #5534 in Alexa global
'http://www.dajuegos.com/',
# Why: #5535 in Alexa global
'http://www.blogs.com/',
# Why: #5536 in Alexa global
'http://www.glopart.ru/',
# Why: #5537 in Alexa global
'http://www.donews.com/',
# Why: #5538 in Alexa global
'http://www.nation.co.ke/',
# Why: #5539 in Alexa global
'http://www.delfi.ee/',
# Why: #5540 in Alexa global
'http://www.lacuerda.net/',
# Why: #5541 in Alexa global
'http://www.jjshouse.com/',
# Why: #5542 in Alexa global
'http://www.megaindex.ru/',
# Why: #5543 in Alexa global
'http://www.darty.com/',
# Why: #5544 in Alexa global
'http://www.maturetube.com/',
# Why: #5545 in Alexa global
'http://www.jokeroo.com/',
# Why: #5546 in Alexa global
'http://www.estekhtam.com/',
# Why: #5547 in Alexa global
'http://www.fnac.es/',
# Why: #5548 in Alexa global
'http://www.ninjakiwi.com/',
# Why: #5549 in Alexa global
'http://www.tovima.gr/',
# Why: #5550 in Alexa global
'http://www.timinternet.it/',
# Why: #5551 in Alexa global
'http://www.citizensbankonline.com/',
# Why: #5552 in Alexa global
'http://www.builtwith.com/',
# Why: #5553 in Alexa global
'http://www.ko499.com/',
# Why: #5554 in Alexa global
'http://www.tastyblacks.com/',
# Why: #5555 in Alexa global
'http://www.currys.co.uk/',
# Why: #5556 in Alexa global
'http://www.jobui.com/',
# Why: #5557 in Alexa global
'http://www.notebookreview.com/',
# Why: #5558 in Alexa global
'http://www.meishij.net/',
# Why: #5559 in Alexa global
'http://www.filerio.in/',
# Why: #5560 in Alexa global
'http://gohappy.com.tw/',
# Why: #5561 in Alexa global
'http://www.cheapflights.co.uk/',
# Why: #5562 in Alexa global
'http://www.puls24.mk/',
# Why: #5563 in Alexa global
'http://www.rumbo.es/',
# Why: #5564 in Alexa global
'http://www.newsbusters.org/',
# Why: #5565 in Alexa global
'http://www.imgdino.com/',
# Why: #5566 in Alexa global
'http://www.oxforddictionaries.com/',
# Why: #5567 in Alexa global
'http://www.ftdownloads.com/',
# Why: #5568 in Alexa global
'http://ciudad.com.ar/',
# Why: #5569 in Alexa global
'http://www.latercera.cl/',
# Why: #5570 in Alexa global
'http://www.lankadeepa.lk/',
# Why: #5571 in Alexa global
'http://www.47news.jp/',
# Why: #5572 in Alexa global
'http://www.bankier.pl/',
# Why: #5573 in Alexa global
'http://www.hawahome.com/',
# Why: #5574 in Alexa global
'http://www.comicvine.com/',
# Why: #5575 in Alexa global
'http://www.cam4.it/',
# Why: #5576 in Alexa global
'http://www.fok.nl/',
# Why: #5577 in Alexa global
'http://www.iknowthatgirl.com/',
# Why: #5578 in Alexa global
'http://www.hizliresim.com/',
# Why: #5579 in Alexa global
'http://www.ebizmba.com/',
# Why: #5580 in Alexa global
'http://www.twistys.com/',
# Why: #5581 in Alexa global
'http://minkchan.com/',
# Why: #5582 in Alexa global
'http://www.dnevnik.hr/',
# Why: #5583 in Alexa global
'http://www.peliculascoco.com/',
# Why: #5584 in Alexa global
'http://www.new-xhamster.com/',
# Why: #5585 in Alexa global
'http://www.freelancer.in/',
# Why: #5586 in Alexa global
'http://www.globalgrind.com/',
# Why: #5587 in Alexa global
'http://www.rbc.cn/',
# Why: #5588 in Alexa global
'http://www.talkgold.com/',
# Why: #5589 in Alexa global
'http://www.p1.cn/',
# Why: #5590 in Alexa global
'http://www.kanui.com.br/',
# Why: #5591 in Alexa global
'http://www.woxikon.de/',
# Why: #5592 in Alexa global
'http://www.cinematoday.jp/',
# Why: #5593 in Alexa global
'http://www.jobstreet.com.my/',
# Why: #5594 in Alexa global
'http://www.job.ru/',
# Why: #5595 in Alexa global
'http://www.wowbiz.ro/',
# Why: #5596 in Alexa global
'http://www.yiyi.cc/',
# Why: #5597 in Alexa global
'http://www.sinoptik.ua/',
# Why: #5598 in Alexa global
'http://www.parents.com/',
# Why: #5599 in Alexa global
'http://www.forblabla.com/',
# Why: #5600 in Alexa global
'http://www.trojmiasto.pl/',
# Why: #5601 in Alexa global
'http://www.anyoption.com/',
# Why: #5602 in Alexa global
'http://www.wplocker.com/',
# Why: #5603 in Alexa global
'http://www.paytm.in/',
# Why: #5604 in Alexa global
'http://www.elespectador.com/',
# Why: #5605 in Alexa global
'http://www.mysitecost.ru/',
# Why: #5606 in Alexa global
'http://www.startribune.com/',
# Why: #5607 in Alexa global
'http://www.cam4.co.uk/',
# Why: #5608 in Alexa global
'http://www.bestcoolmobile.com/',
# Why: #5609 in Alexa global
'http://www.soup.io/',
# Why: #5610 in Alexa global
'http://www.starfall.com/',
# Why: #5611 in Alexa global
'http://www.ixl.com/',
# Why: #5612 in Alexa global
'http://www.oreilly.com/',
# Why: #5613 in Alexa global
'http://www.dansmovies.com/',
# Why: #5614 in Alexa global
'http://www.facemoods.com/',
# Why: #5615 in Alexa global
'http://www.google.ge/',
# Why: #5616 in Alexa global
'http://www.sat.gob.mx/',
# Why: #5617 in Alexa global
'http://www.weatherbug.com/',
# Why: #5618 in Alexa global
'http://www.majorgeeks.com/',
# Why: #5619 in Alexa global
'http://www.llbean.com/',
# Why: #5620 in Alexa global
'http://www.catho.com.br/',
# Why: #5621 in Alexa global
'http://www.gungho.jp/',
# Why: #5622 in Alexa global
'http://www.mk.co.kr/',
# Why: #5623 in Alexa global
'http://www.googlegroups.com/',
# Why: #5624 in Alexa global
'http://www.animoto.com/',
# Why: #5625 in Alexa global
'http://www.alquds.co.uk/',
# Why: #5626 in Alexa global
'http://www.newsday.com/',
# Why: #5627 in Alexa global
'http://www.games2girls.com/',
# Why: #5628 in Alexa global
'http://www.youporngay.com/',
# Why: #5629 in Alexa global
'http://www.spaces.ru/',
# Why: #5630 in Alexa global
'http://www.seriespepito.com/',
# Why: #5631 in Alexa global
'http://www.gelbeseiten.de/',
# Why: #5632 in Alexa global
'http://www.thethirdmedia.com/',
# Why: #5633 in Alexa global
'http://www.watchfomny.com/',
# Why: #5634 in Alexa global
'http://www.freecamsexposed.com/',
# Why: #5635 in Alexa global
'http://www.dinakaran.com/',
# Why: #5636 in Alexa global
'http://www.xxxhost.me/',
# Why: #5637 in Alexa global
'http://www.smartprix.com/',
# Why: #5638 in Alexa global
'http://www.thoughtcatalog.com/',
# Why: #5639 in Alexa global
'http://www.soccersuck.com/',
# Why: #5640 in Alexa global
'http://www.vivanuncios.com/',
# Why: #5641 in Alexa global
'http://www.liba.com/',
# Why: #5642 in Alexa global
'http://www.gog.com/',
# Why: #5643 in Alexa global
'http://www.philstar.com/',
# Why: #5644 in Alexa global
'http://www.cian.ru/',
# Why: #5645 in Alexa global
'http://www.avclub.com/',
# Why: #5646 in Alexa global
'http://www.slon.ru/',
# Why: #5647 in Alexa global
'http://www.stc.com.sa/',
# Why: #5648 in Alexa global
'http://www.jstor.org/',
# Why: #5649 in Alexa global
'http://www.wehkamp.nl/',
# Why: #5650 in Alexa global
'http://www.vodafone.co.uk/',
# Why: #5651 in Alexa global
'http://www.deser.pl/',
# Why: #5652 in Alexa global
'http://www.adscendmedia.com/',
# Why: #5653 in Alexa global
'http://www.getcashforsurveys.com/',
# Why: #5654 in Alexa global
'http://www.glamsham.com/',
# Why: #5655 in Alexa global
'http://www.dressupgames.com/',
# Why: #5656 in Alexa global
'http://www.lifo.gr/',
# Why: #5657 in Alexa global
'http://www.37signals.com/',
# Why: #5658 in Alexa global
'http://www.pdfonline.com/',
# Why: #5659 in Alexa global
'http://www.flipkey.com/',
# Why: #5660 in Alexa global
'http://www.epochtimes.com/',
# Why: #5662 in Alexa global
'http://www.futhead.com/',
# Why: #5663 in Alexa global
'http://www.inlinkz.com/',
# Why: #5664 in Alexa global
'http://www.fx-trend.com/',
# Why: #5665 in Alexa global
'http://www.yasdl.com/',
# Why: #5666 in Alexa global
'http://www.techbang.com/',
# Why: #5667 in Alexa global
'http://www.narenji.ir/',
# Why: #5668 in Alexa global
'http://www.szonline.net/',
# Why: #5669 in Alexa global
'http://www.perfil.com.ar/',
# Why: #5670 in Alexa global
'http://www.mywebface.com/',
# Why: #5671 in Alexa global
'http://www.taknaz.ir/',
# Why: #5672 in Alexa global
'http://www.tradera.com/',
# Why: #5673 in Alexa global
'http://www.golem.de/',
# Why: #5674 in Alexa global
'http://www.its-mo.com/',
# Why: #5675 in Alexa global
'http://www.arabnet5.com/',
# Why: #5676 in Alexa global
'http://www.freerepublic.com/',
# Why: #5677 in Alexa global
'http://www.britannica.com/',
# Why: #5678 in Alexa global
'http://www.deccanchronicle.com/',
# Why: #5679 in Alexa global
'http://www.ohio.gov/',
# Why: #5680 in Alexa global
'http://www.busuu.com/',
# Why: #5681 in Alexa global
'http://www.pricecheck.co.za/',
# Why: #5682 in Alexa global
'http://www.paltalk.com/',
# Why: #5683 in Alexa global
'http://www.sportinglife.com/',
# Why: #5684 in Alexa global
'http://www.google.sn/',
# Why: #5685 in Alexa global
'http://www.meteomedia.com/',
# Why: #5686 in Alexa global
'http://www.push2check.net/',
# Why: #5687 in Alexa global
'http://www.ing-diba.de/',
# Why: #5688 in Alexa global
'http://www.immoweb.be/',
# Why: #5689 in Alexa global
'http://www.oregonlive.com/',
# Why: #5690 in Alexa global
'http://www.ge.tt/',
# Why: #5691 in Alexa global
'http://www.bbspink.com/',
# Why: #5692 in Alexa global
'http://www.business2community.com/',
# Why: #5693 in Alexa global
'http://www.viidii.com/',
# Why: #5694 in Alexa global
'http://www.hrloo.com/',
# Why: #5695 in Alexa global
'http://www.mglradio.com/',
# Why: #5696 in Alexa global
'http://www.cosme.net/',
# Why: #5697 in Alexa global
'http://www.xilu.com/',
# Why: #5698 in Alexa global
'http://www.scbeasy.com/',
# Why: #5699 in Alexa global
'http://www.biglots.com/',
# Why: #5700 in Alexa global
'http://www.dhakatimes24.com/',
# Why: #5701 in Alexa global
'http://www.spankbang.com/',
# Why: #5702 in Alexa global
'http://www.hitleap.com/',
# Why: #5703 in Alexa global
'http://www.proz.com/',
# Why: #5704 in Alexa global
'http://www.php100.com/',
# Why: #5705 in Alexa global
'http://www.tvtoday.de/',
# Why: #5706 in Alexa global
'http://www.funnie.st/',
# Why: #5707 in Alexa global
'http://www.velvet.hu/',
# Why: #5708 in Alexa global
'http://www.dhnet.be/',
# Why: #5709 in Alexa global
'http://www.capital.gr/',
# Why: #5710 in Alexa global
'http://www.inosmi.ru/',
# Why: #5711 in Alexa global
'http://www.healthkart.com/',
# Why: #5712 in Alexa global
'http://www.amway.com/',
# Why: #5713 in Alexa global
'http://www.madmimi.com/',
# Why: #5714 in Alexa global
'http://www.dramafever.com/',
# Why: #5715 in Alexa global
'http://www.oodle.com/',
# Why: #5716 in Alexa global
'http://www.spreadshirt.com/',
# Why: #5717 in Alexa global
'http://www.google.mg/',
# Why: #5718 in Alexa global
'http://www.utarget.ru/',
# Why: #5719 in Alexa global
'http://www.matomy.com/',
# Why: #5720 in Alexa global
'http://www.medhelp.org/',
# Why: #5721 in Alexa global
'http://www.cumlouder.com/',
# Why: #5723 in Alexa global
'http://www.aliorbank.pl/',
# Why: #5724 in Alexa global
'http://www.takepart.com/',
# Why: #5725 in Alexa global
'http://www.myfreshnet.com/',
# Why: #5726 in Alexa global
'http://www.adorama.com/',
# Why: #5727 in Alexa global
'http://www.dhs.gov/',
# Why: #5728 in Alexa global
'http://www.suruga-ya.jp/',
# Why: #5729 in Alexa global
'http://www.mivo.tv/',
# Why: #5730 in Alexa global
'http://www.nchsoftware.com/',
# Why: #5731 in Alexa global
'http://www.gnc.com/',
# Why: #5732 in Alexa global
'http://www.spiceworks.com/',
# Why: #5734 in Alexa global
'http://www.jeu.fr/',
# Why: #5735 in Alexa global
'http://www.tv-tokyo.co.jp/',
# Why: #5736 in Alexa global
'http://www.terra.com/',
# Why: #5737 in Alexa global
'http://www.irishtimes.com/',
# Why: #5738 in Alexa global
'http://www.kleiderkreisel.de/',
# Why: #5739 in Alexa global
'http://www.ebay.be/',
# Why: #5740 in Alexa global
'http://www.rt.ru/',
# Why: #5741 in Alexa global
'http://www.radiofarda.com/',
# Why: #5742 in Alexa global
'http://www.atrapalo.com/',
# Why: #5743 in Alexa global
'http://www.southcn.com/',
# Why: #5744 in Alexa global
'http://www.turkcell.com.tr/',
# Why: #5745 in Alexa global
'http://www.themetapicture.com/',
# Why: #5746 in Alexa global
'http://www.aujourdhui.com/',
# Why: #5747 in Alexa global
'http://www.ato.gov.au/',
# Why: #5748 in Alexa global
'http://www.pelis24.com/',
# Why: #5749 in Alexa global
'http://www.saaid.net/',
# Why: #5750 in Alexa global
'http://www.bradsdeals.com/',
# Why: #5751 in Alexa global
'http://www.pirate101.com/',
# Why: #5752 in Alexa global
'http://www.saturn.de/',
# Why: #5753 in Alexa global
'http://www.thisissouthwales.co.uk/',
# Why: #5754 in Alexa global
'http://www.cyberlink.com/',
# Why: #5755 in Alexa global
'http://www.internationalredirects.com/',
# Why: #5756 in Alexa global
'http://www.radardedescontos.com.br/',
# Why: #5758 in Alexa global
'http://www.rapidcontentwizard.com/',
# Why: #5759 in Alexa global
'http://www.kabum.com.br/',
# Why: #5761 in Alexa global
'http://www.athome.co.jp/',
# Why: #5762 in Alexa global
'http://www.webrankinfo.com/',
# Why: #5763 in Alexa global
'http://www.kiabi.com/',
# Why: #5764 in Alexa global
'http://www.farecompare.com/',
# Why: #5765 in Alexa global
'http://www.xinjunshi.com/',
# Why: #5766 in Alexa global
'http://www.youtube.com/user/SkyDoesMinecraft/',
# Why: #5767 in Alexa global
'http://www.vidxden.com/',
# Why: #5768 in Alexa global
'http://www.pvrcinemas.com/',
# Why: #5769 in Alexa global
'http://chachaba.com/',
# Why: #5770 in Alexa global
'http://www.wanmei.com/',
# Why: #5771 in Alexa global
'http://alternet.org/',
# Why: #5772 in Alexa global
'http://www.rozklad-pkp.pl/',
# Why: #5773 in Alexa global
'http://www.omniture.com/',
# Why: #5774 in Alexa global
'http://www.childrensplace.com/',
# Why: #5775 in Alexa global
'http://www.menards.com/',
# Why: #5776 in Alexa global
'http://www.zhcw.com/',
# Why: #5777 in Alexa global
'http://www.ouest-france.fr/',
# Why: #5778 in Alexa global
'http://www.vitorrent.org/',
# Why: #5779 in Alexa global
'http://www.xanga.com/',
# Why: #5780 in Alexa global
'http://www.zbozi.cz/',
# Why: #5781 in Alexa global
'http://www.dnspod.cn/',
# Why: #5782 in Alexa global
'http://www.radioshack.com/',
# Why: #5783 in Alexa global
'http://www.startv.in/',
# Why: #5784 in Alexa global
'http://www.affiliatewindow.com/',
# Why: #5785 in Alexa global
'http://www.gov.on.ca/',
# Why: #5786 in Alexa global
'http://www.grainger.com/',
# Why: #5787 in Alexa global
'http://www.3rat.com/',
# Why: #5788 in Alexa global
'http://www.indeed.co.za/',
# Why: #5789 in Alexa global
'http://www.rtbf.be/',
# Why: #5790 in Alexa global
'http://www.strava.com/',
# Why: #5791 in Alexa global
'http://www.disneystore.com/',
# Why: #5792 in Alexa global
'http://www.travelagency.travel/',
# Why: #5793 in Alexa global
'http://www.ekitan.com/',
# Why: #5794 in Alexa global
'http://www.66law.cn/',
# Why: #5795 in Alexa global
'http://www.volagratis.com/',
# Why: #5796 in Alexa global
'http://www.yiiframework.com/',
# Why: #5797 in Alexa global
'http://www.dramacrazy.net/',
# Why: #5798 in Alexa global
'http://www.addtoany.com/',
# Why: #5799 in Alexa global
'http://www.uzmantv.com/',
# Why: #5800 in Alexa global
'http://www.uline.com/',
# Why: #5801 in Alexa global
'http://www.fitnessmagazine.com/',
# Why: #5802 in Alexa global
'http://www.khmerload.com/',
# Why: #5803 in Alexa global
'http://www.italiafilm.tv/',
# Why: #5804 in Alexa global
'http://www.baseball-reference.com/',
# Why: #5805 in Alexa global
'http://www.neopets.com/',
# Why: #5806 in Alexa global
'http://www.multiupload.nl/',
# Why: #5807 in Alexa global
'http://www.lakii.com/',
# Why: #5808 in Alexa global
'http://www.downloadmaster.ru/',
# Why: #5809 in Alexa global
'http://www.babbel.com/',
# Why: #5810 in Alexa global
'http://www.gossip-tv.gr/',
# Why: #5811 in Alexa global
'http://www.laban.vn/',
# Why: #5812 in Alexa global
'http://www.computerbase.de/',
# Why: #5813 in Alexa global
'http://www.juyouqu.com/',
# Why: #5814 in Alexa global
'http://www.markt.de/',
# Why: #5815 in Alexa global
'http://www.linuxquestions.org/',
# Why: #5816 in Alexa global
'http://www.giveawayoftheday.com/',
# Why: #5817 in Alexa global
'http://www.176.com/',
# Why: #5818 in Alexa global
'http://www.cs.com.cn/',
# Why: #5819 in Alexa global
'http://www.homemademoviez.com/',
# Why: #5820 in Alexa global
'http://www.huffingtonpost.fr/',
# Why: #5821 in Alexa global
'http://www.movieweb.com/',
# Why: #5822 in Alexa global
'http://www.pornzeus.com/',
# Why: #5823 in Alexa global
'http://www.posta.com.tr/',
# Why: #5824 in Alexa global
'http://www.biography.com/',
# Why: #5825 in Alexa global
'http://www.bukkit.org/',
# Why: #5826 in Alexa global
'http://www.spirit.com/',
# Why: #5827 in Alexa global
'http://www.vemale.com/',
# Why: #5828 in Alexa global
'http://www.elnuevodia.com/',
# Why: #5829 in Alexa global
'http://www.pof.com.br/',
# Why: #5830 in Alexa global
'http://www.iranproud.com/',
# Why: #5831 in Alexa global
'http://www.molodost.bz/',
# Why: #5832 in Alexa global
'http://www.netcarshow.com/',
# Why: #5833 in Alexa global
'http://www.ardmediathek.de/',
# Why: #5834 in Alexa global
'http://www.fabfurnish.com/',
# Why: #5835 in Alexa global
'http://www.myfreeblack.com/',
# Why: #5836 in Alexa global
'http://www.antichat.ru/',
# Why: #5837 in Alexa global
'http://www.crocko.com/',
# Why: #5838 in Alexa global
'http://www.cocacola.co.jp/',
# Why: #5839 in Alexa global
'http://b5m.com/',
# Why: #5840 in Alexa global
'http://www.entrance-exam.net/',
# Why: #5841 in Alexa global
'http://www.benaughty.com/',
# Why: #5842 in Alexa global
'http://www.sierratradingpost.com/',
# Why: #5843 in Alexa global
'http://www.apartmentguide.com/',
# Why: #5844 in Alexa global
'http://www.slimspots.com/',
# Why: #5845 in Alexa global
'http://www.sondakika.com/',
# Why: #5846 in Alexa global
'http://www.glamour.com/',
# Why: #5847 in Alexa global
'http://www.zto.cn/',
# Why: #5848 in Alexa global
'http://www.ilyke.net/',
# Why: #5849 in Alexa global
'http://www.mybroadband.co.za/',
# Why: #5850 in Alexa global
'http://www.alaskaair.com/',
# Why: #5851 in Alexa global
'http://www.virtualtourist.com/',
# Why: #5852 in Alexa global
'http://www.rexxx.com/',
# Why: #5853 in Alexa global
'http://www.fullhdfilmizle.org/',
# Why: #5854 in Alexa global
'http://www.starpulse.com/',
# Why: #5855 in Alexa global
'http://www.winkal.com/',
# Why: #5856 in Alexa global
'http://www.ad-feeds.net/',
# Why: #5857 in Alexa global
'http://www.irannaz.com/',
# Why: #5858 in Alexa global
'http://www.elahmad.com/',
# Why: #5859 in Alexa global
'http://www.dealspl.us/',
# Why: #5860 in Alexa global
'http://www.moikrug.ru/',
# Why: #5861 in Alexa global
'http://www.olx.com.mx/',
# Why: #5862 in Alexa global
'http://www.rd.com/',
# Why: #5863 in Alexa global
'http://www.newone.org/',
# Why: #5864 in Alexa global
'http://www.naijapals.com/',
# Why: #5865 in Alexa global
'http://www.forgifs.com/',
# Why: #5866 in Alexa global
'http://www.fsjgw.com/',
# Why: #5867 in Alexa global
'http://edeng.cn/',
# Why: #5868 in Alexa global
'http://www.nicoviewer.net/',
# Why: #5869 in Alexa global
'http://www.topeleven.com/',
# Why: #5870 in Alexa global
'http://www.peerfly.com/',
# Why: #5871 in Alexa global
'http://www.softportal.com/',
# Why: #5872 in Alexa global
'http://www.clker.com/',
# Why: #5873 in Alexa global
'http://www.tehran98.com/',
# Why: #5874 in Alexa global
'http://weather2umbrella.com/',
# Why: #5875 in Alexa global
'http://www.jreast.co.jp/',
# Why: #5876 in Alexa global
'http://www.kuxun.cn/',
# Why: #5877 in Alexa global
'http://www.lookbook.nu/',
# Why: #5878 in Alexa global
'http://www.futureshop.ca/',
# Why: #5879 in Alexa global
'http://www.blackpeoplemeet.com/',
# Why: #5880 in Alexa global
'http://www.adworkmedia.com/',
# Why: #5881 in Alexa global
'http://www.entire.xxx/',
# Why: #5882 in Alexa global
'http://www.bitbucket.org/',
# Why: #5884 in Alexa global
'http://www.transfermarkt.co.uk/',
# Why: #5885 in Alexa global
'http://www.moshimonsters.com/',
# Why: #5886 in Alexa global
'http://www.4travel.jp/',
# Why: #5887 in Alexa global
'http://www.baimao.com/',
# Why: #5888 in Alexa global
'http://www.khanacademy.org/',
# Why: #5889 in Alexa global
'http://www.2chan.net/',
# Why: #5890 in Alexa global
'http://www.adopteunmec.com/',
# Why: #5891 in Alexa global
'http://www.mochimedia.com/',
# Why: #5892 in Alexa global
'http://www.strawberrynet.com/',
# Why: #5893 in Alexa global
'http://www.gdeivse.com/',
# Why: #5894 in Alexa global
'http://www.speckyboy.com/',
# Why: #5895 in Alexa global
'http://www.radical-foto.ru/',
# Why: #5896 in Alexa global
'http://www.softcoin.com/',
# Why: #5897 in Alexa global
'http://www.cnews.ru/',
# Why: #5898 in Alexa global
'http://www.ubs.com/',
# Why: #5899 in Alexa global
'http://www.lankasri.com/',
# Why: #5900 in Alexa global
'http://www.cylex.de/',
# Why: #5901 in Alexa global
'http://www.imtranslator.net/',
# Why: #5902 in Alexa global
'http://www.homeoffice.gov.uk/',
# Why: #5903 in Alexa global
'http://www.answerbag.com/',
# Why: #5904 in Alexa global
'http://www.chainreactioncycles.com/',
# Why: #5905 in Alexa global
'http://www.sportal.bg/',
# Why: #5906 in Alexa global
'http://www.livemaster.ru/',
# Why: #5907 in Alexa global
'http://www.mercadolibre.com.pe/',
# Why: #5908 in Alexa global
'http://www.mentalfloss.com/',
# Why: #5909 in Alexa global
'http://www.google.am/',
# Why: #5910 in Alexa global
'http://www.mawaly.com/',
# Why: #5911 in Alexa global
'http://www.douban.fm/',
# Why: #5912 in Alexa global
'http://www.abidjan.net/',
# Why: #5913 in Alexa global
'http://www.pricegong.com/',
# Why: #5914 in Alexa global
'http://www.brother.com/',
# Why: #5915 in Alexa global
'http://www.basspro.com/',
# Why: #5916 in Alexa global
'http://popsci.com/',
# Why: #5917 in Alexa global
'http://www.olx.com.ar/',
# Why: #5918 in Alexa global
'http://www.python.org/',
# Why: #5919 in Alexa global
'http://www.voetbalzone.nl/',
# Why: #5920 in Alexa global
'http://www.518.com.tw/',
# Why: #5921 in Alexa global
'http://www.aztecaporno.com/',
# Why: #5922 in Alexa global
'http://www.d-h.st/',
# Why: #5923 in Alexa global
'http://www.voyeurweb.com/',
# Why: #5924 in Alexa global
'http://www.storenvy.com/',
# Why: #5925 in Alexa global
'http://www.aftabir.com/',
# Why: #5926 in Alexa global
'http://www.imgsrc.ru/',
# Why: #5927 in Alexa global
'http://www.peru.com/',
# Why: #5928 in Alexa global
'http://www.mindbodygreen.com/',
# Why: #5929 in Alexa global
'http://www.stereotude.com/',
# Why: #5930 in Alexa global
'http://www.ar15.com/',
# Why: #5931 in Alexa global
'http://www.gogecapital.com/',
# Why: #5932 in Alexa global
'http://xipin.me/',
# Why: #5933 in Alexa global
'http://www.gvt.com.br/',
# Why: #5934 in Alexa global
'http://www.today.it/',
# Why: #5935 in Alexa global
'http://www.mastercard.com.au/',
# Why: #5936 in Alexa global
'http://www.hobbyking.com/',
# Why: #5937 in Alexa global
'http://www.hawkhost.com/',
# Why: #5938 in Alexa global
'http://www.thebump.com/',
# Why: #5939 in Alexa global
'http://www.alpari.ru/',
# Why: #5940 in Alexa global
'http://www.gamma-ic.com/',
# Why: #5941 in Alexa global
'http://www.mundome.com/',
# Why: #5942 in Alexa global
'http://www.televisao.uol.com.br/',
# Why: #5943 in Alexa global
'http://www.quotev.com/',
# Why: #5944 in Alexa global
'http://www.animaljam.com/',
# Why: #5945 in Alexa global
'http://www.ohozaa.com/',
# Why: #5946 in Alexa global
'http://www.sayyac.com/',
# Why: #5947 in Alexa global
'http://www.kobobooks.com/',
# Why: #5948 in Alexa global
'http://www.muslima.com/',
# Why: #5949 in Alexa global
'http://www.digsitesvalue.net/',
# Why: #5950 in Alexa global
'http://www.colourlovers.com/',
# Why: #5951 in Alexa global
'http://www.uludagsozluk.com/',
# Why: #5952 in Alexa global
'http://www.mercadolibre.com.uy/',
# Why: #5953 in Alexa global
'http://www.oem.com.mx/',
# Why: #5954 in Alexa global
'http://www.self.com/',
# Why: #5955 in Alexa global
'http://www.kyohk.net/',
# Why: #5957 in Alexa global
'http://www.dillards.com/',
# Why: #5958 in Alexa global
'http://www.eduu.com/',
# Why: #5959 in Alexa global
'http://www.replays.net/',
# Why: #5960 in Alexa global
'http://www.bnpparibasfortis.be/',
# Why: #5961 in Alexa global
'http://www.express.co.uk/',
# Why: #5962 in Alexa global
'http://www.levelupgames.uol.com.br/',
# Why: #5963 in Alexa global
'http://www.guaixun.com/',
# Why: #5964 in Alexa global
'http://www.750g.com/',
# Why: #5965 in Alexa global
'http://www.craveonline.com/',
# Why: #5966 in Alexa global
'http://www.markafoni.com/',
# Why: #5968 in Alexa global
'http://www.ename.com/',
# Why: #5969 in Alexa global
'http://www.abercrombie.com/',
# Why: #5970 in Alexa global
'http://www.noticiaaldia.com/',
# Why: #5971 in Alexa global
'http://www.seniorpeoplemeet.com/',
# Why: #5972 in Alexa global
'http://www.dhingana.com/',
# Why: #5974 in Alexa global
'http://www.prokerala.com/',
# Why: #5975 in Alexa global
'http://www.iefimerida.gr/',
# Why: #5976 in Alexa global
'http://www.wprazzi.com/',
# Why: #5977 in Alexa global
'http://www.pantipmarket.com/',
# Why: #5978 in Alexa global
'http://www.vueling.com/',
# Why: #5979 in Alexa global
'http://www.newsonlineweekly.com/',
# Why: #5980 in Alexa global
'http://cr173.com/',
# Why: #5981 in Alexa global
'http://www.ecp888.com/',
# Why: #5982 in Alexa global
'http://www.diary.ru/',
# Why: #5983 in Alexa global
'http://www.pervclips.com/',
# Why: #5984 in Alexa global
'http://www.sudaneseonline.com/',
# Why: #5985 in Alexa global
'http://www.personal.com.ar/',
# Why: #5986 in Alexa global
'http://www.articlesnatch.com/',
# Why: #5987 in Alexa global
'http://www.mitbbs.com/',
# Why: #5988 in Alexa global
'http://www.techsupportalert.com/',
# Why: #5989 in Alexa global
'http://www.filepost.com/',
# Why: #5990 in Alexa global
'http://www.unblockyoutube.co.uk/',
# Why: #5991 in Alexa global
'http://www.hasznaltauto.hu/',
# Why: #5992 in Alexa global
'http://www.dmv.org/',
# Why: #5993 in Alexa global
'http://www.port.hu/',
# Why: #5995 in Alexa global
'http://www.anastasiadate.com/',
# Why: #5996 in Alexa global
'http://www.adtgs.com/',
# Why: #5997 in Alexa global
'http://www.namejet.com/',
# Why: #5998 in Alexa global
'http://www.ally.com/',
# Why: #5999 in Alexa global
'http://www.djmaza.com/',
# Why: #6001 in Alexa global
'http://www.asstr.org/',
# Why: #6002 in Alexa global
'http://www.corel.com/',
# Why: #6003 in Alexa global
'http://www.interfax.ru/',
# Why: #6004 in Alexa global
'http://www.rozee.pk/',
# Why: #6005 in Alexa global
'http://www.akinator.com/',
# Why: #6006 in Alexa global
'http://www.dominos.co.in/',
# Why: #6007 in Alexa global
'http://boardgamegeek.com/',
# Why: #6008 in Alexa global
'http://www.teamliquid.net/',
# Why: #6009 in Alexa global
'http://www.sbrf.ru/',
# Why: #6010 in Alexa global
'http://www.l99.com/',
# Why: #6011 in Alexa global
'http://www.eatingwell.com/',
# Why: #6012 in Alexa global
'http://www.mid-day.com/',
# Why: #6013 in Alexa global
'http://www.blinkogold.it/',
# Why: #6014 in Alexa global
'http://www.rosbalt.ru/',
# Why: #6015 in Alexa global
'http://copadomundo.uol.com.br/',
# Why: #6016 in Alexa global
'http://www.islammemo.cc/',
# Why: #6017 in Alexa global
'http://www.bettycrocker.com/',
# Why: #6018 in Alexa global
'http://www.womenshealthmag.com/',
# Why: #6019 in Alexa global
'http://www.asandownload.com/',
# Why: #6020 in Alexa global
'http://www.twitcasting.tv/',
# Why: #6021 in Alexa global
'http://www.10and9.com/',
# Why: #6022 in Alexa global
'http://www.youngleafs.com/',
# Why: #6023 in Alexa global
'http://www.saharareporters.com/',
# Why: #6024 in Alexa global
'http://www.overclock.net/',
# Why: #6025 in Alexa global
'http://www.mapsgalaxy.com/',
# Why: #6026 in Alexa global
'http://www.internetslang.com/',
# Why: #6027 in Alexa global
'http://www.sokmil.com/',
# Why: #6028 in Alexa global
'http://www.yousendit.com/',
# Why: #6029 in Alexa global
'http://www.forex-mmcis.com/',
# Why: #6030 in Alexa global
'http://www.vador.com/',
# Why: #6031 in Alexa global
'http://www.pagewash.com/',
# Why: #6032 in Alexa global
'http://www.pringotrack.com/',
# Why: #6033 in Alexa global
'http://www.cpmstar.com/',
# Why: #6034 in Alexa global
'http://www.yxdown.com/',
# Why: #6035 in Alexa global
'http://www.surfingbird.ru/',
# Why: #6036 in Alexa global
'http://kyodo-d.jp/',
# Why: #6037 in Alexa global
'http://www.identi.li/',
# Why: #6038 in Alexa global
'http://www.n4hr.com/',
# Why: #6039 in Alexa global
'http://www.elitetorrent.net/',
# Why: #6040 in Alexa global
'http://www.livechatinc.com/',
# Why: #6041 in Alexa global
'http://www.anzhi.com/',
# Why: #6042 in Alexa global
'http://www.2checkout.com/',
# Why: #6043 in Alexa global
'http://www.bancoestado.cl/',
# Why: #6044 in Alexa global
'http://www.epson.com/',
# Why: #6045 in Alexa global
'http://www.twodollarclick.com/',
# Why: #6046 in Alexa global
'http://www.okaz.com.sa/',
# Why: #6047 in Alexa global
'http://china-sss.com/',
# Why: #6048 in Alexa global
'http://www.sagawa-exp.co.jp/',
# Why: #6049 in Alexa global
'http://www.xforex.com/',
# Why: #6050 in Alexa global
'http://www.salliemae.com/',
# Why: #6051 in Alexa global
'http://www.acunn.com/',
# Why: #6052 in Alexa global
'http://www.navyfederal.org/',
# Why: #6053 in Alexa global
'http://www.forumactif.com/',
# Why: #6054 in Alexa global
'http://www.affaire.com/',
# Why: #6055 in Alexa global
'http://www.mediatemple.net/',
# Why: #6056 in Alexa global
'http://www.qdmm.com/',
# Why: #6057 in Alexa global
'http://www.urlm.co/',
# Why: #6058 in Alexa global
'http://www.toofab.com/',
# Why: #6059 in Alexa global
'http://www.yola.com/',
# Why: #6060 in Alexa global
'http://www.sheldonsfans.com/',
# Why: #6061 in Alexa global
'http://www.piratestreaming.com/',
# Why: #6062 in Alexa global
'http://www.frontier.com/',
# Why: #6063 in Alexa global
'http://www.jxnews.com.cn/',
# Why: #6064 in Alexa global
'http://www.businesswire.com/',
# Why: #6065 in Alexa global
'http://www.rue89.com/',
# Why: #6066 in Alexa global
'http://www.yenisafak.com.tr/',
# Why: #6067 in Alexa global
'http://www.wikimart.ru/',
# Why: #6068 in Alexa global
'http://www.22.cn/',
# Why: #6069 in Alexa global
'http://www.xpressvids.info/',
# Why: #6070 in Alexa global
'http://www.medicalnewstoday.com/',
# Why: #6071 in Alexa global
'http://www.express.de/',
# Why: #6072 in Alexa global
'http://www.grid.mk/',
# Why: #6073 in Alexa global
'http://www.mass.gov/',
# Why: #6074 in Alexa global
'http://www.onlinefinder.net/',
# Why: #6075 in Alexa global
'http://www.yllix.com/',
# Why: #6076 in Alexa global
'http://www.aksam.com.tr/',
# Why: #6077 in Alexa global
'http://www.telegraf.rs/',
# Why: #6078 in Alexa global
'http://www.templatic.com/',
# Why: #6079 in Alexa global
'http://www.kandao.com/',
# Why: #6080 in Alexa global
'http://www.policymic.com/',
# Why: #6081 in Alexa global
'http://www.farfesh.com/',
# Why: #6082 in Alexa global
'http://www.alza.cz/',
# Why: #6083 in Alexa global
'http://www.judgeporn.com/',
# Why: #6084 in Alexa global
'http://townwork.net/',
# Why: #6085 in Alexa global
'http://3dcartstores.com/',
# Why: #6086 in Alexa global
'http://www.marketingland.com/',
# Why: #6087 in Alexa global
'http://okooo.com/',
# Why: #6088 in Alexa global
'http://www.siteduzero.com/',
# Why: #6089 in Alexa global
'http://www.cellbazaar.com/',
# Why: #6090 in Alexa global
'http://www.omb100.com/',
# Why: #6091 in Alexa global
'http://www.danarimedia.com/',
# Why: #6092 in Alexa global
'http://www.nlcafe.hu/',
# Why: #6093 in Alexa global
'http://www.qz.com/',
# Why: #6094 in Alexa global
'http://www.indiapost.gov.in/',
# Why: #6095 in Alexa global
'http://www.kinogo.net/',
# Why: #6096 in Alexa global
'http://www.neverblue.com/',
# Why: #6097 in Alexa global
'http://www.spyfu.com/',
# Why: #6098 in Alexa global
'http://www.shindanmaker.com/',
# Why: #6099 in Alexa global
'http://bankpasargad.com/',
# Why: #6100 in Alexa global
'http://www.techweb.com.cn/',
# Why: #6101 in Alexa global
'http://internetautoguide.com/',
# Why: #6102 in Alexa global
'http://www.allover30.com/',
# Why: #6103 in Alexa global
'http://www.metric-conversions.org/',
# Why: #6104 in Alexa global
'http://www.carid.com/',
# Why: #6105 in Alexa global
'http://www.mofos.com/',
# Why: #6106 in Alexa global
'http://www.kanald.com.tr/',
# Why: #6107 in Alexa global
'http://www.mobikwik.com/',
# Why: #6108 in Alexa global
'http://www.checkpagerank.net/',
# Why: #6109 in Alexa global
'http://www.hotscripts.com/',
# Why: #6110 in Alexa global
'http://www.hornywife.com/',
# Why: #6111 in Alexa global
'http://www.prixmoinscher.com/',
# Why: #6112 in Alexa global
'http://www.worldbank.org/',
# Why: #6113 in Alexa global
'http://www.wsodownloads.info/',
# Why: #6114 in Alexa global
'http://www.his-j.com/',
# Why: #6115 in Alexa global
'http://www.powned.tv/',
# Why: #6116 in Alexa global
'http://www.redmondpie.com/',
# Why: #6117 in Alexa global
'http://www.molotok.ru/',
# Why: #6118 in Alexa global
'http://www.whatmobile.com.pk/',
# Why: #6119 in Alexa global
'http://www.wiziq.com/',
# Why: #6120 in Alexa global
'http://www.excelsior.com.mx/',
# Why: #6121 in Alexa global
'http://www.tradetang.com/',
# Why: #6122 in Alexa global
'http://www.terra.es/',
# Why: #6123 in Alexa global
'http://www.sdchina.com/',
# Why: #6124 in Alexa global
'http://www.rai.tv/',
# Why: #6125 in Alexa global
'http://www.indiansexstories.net/',
# Why: #6127 in Alexa global
'http://www.upbulk.com/',
# Why: #6128 in Alexa global
'http://www.surveygizmo.com/',
# Why: #6129 in Alexa global
'http://www.ulta.com/',
# Why: #6130 in Alexa global
'http://www.tera-europe.com/',
# Why: #6131 in Alexa global
'http://www.tuoitre.vn/',
# Why: #6132 in Alexa global
'http://www.onedio.com/',
# Why: #6133 in Alexa global
'http://www.jz123.cn/',
# Why: #6134 in Alexa global
'http://www.canon.jp/',
# Why: #6135 in Alexa global
'http://www.favim.com/',
# Why: #6136 in Alexa global
'http://www.seo-fast.ru/',
# Why: #6137 in Alexa global
'http://www.twitterfeed.com/',
# Why: #6138 in Alexa global
'http://www.trustedreviews.com/',
# Why: #6139 in Alexa global
'http://www.ztgame.com/',
# Why: #6140 in Alexa global
'http://www.radiojavan.com/',
# Why: #6141 in Alexa global
'http://fun698.com/',
# Why: #6142 in Alexa global
'http://www.126.net/',
# Why: #6143 in Alexa global
'http://www.indiaglitz.com/',
# Why: #6144 in Alexa global
'http://www.jdouga.com/',
# Why: #6145 in Alexa global
'http://www.lofter.com/',
# Why: #6146 in Alexa global
'http://www.mysavings.com/',
# Why: #6147 in Alexa global
'http://www.snapfish.com/',
# Why: #6148 in Alexa global
'http://www.i-sux.com/',
# Why: #6149 in Alexa global
'http://www.cebbank.com/',
# Why: #6150 in Alexa global
'http://www.ethnos.gr/',
# Why: #6151 in Alexa global
'http://www.desktop2ch.tv/',
# Why: #6152 in Alexa global
'http://www.expedia.ca/',
# Why: #6153 in Alexa global
'http://www.kinja.com/',
# Why: #6154 in Alexa global
'http://www.rusfolder.com/',
# Why: #6155 in Alexa global
'http://www.expat-blog.com/',
# Why: #6156 in Alexa global
'http://www.8teenxxx.com/',
# Why: #6157 in Alexa global
'http://www.variety.com/',
# Why: #6158 in Alexa global
'http://www.natemat.pl/',
# Why: #6159 in Alexa global
'http://www.niazpardaz.com/',
# Why: #6160 in Alexa global
'http://www.gezginler.net/',
# Why: #6161 in Alexa global
'http://www.baur.de/',
# Why: #6162 in Alexa global
'http://www.tv2.no/',
# Why: #6163 in Alexa global
'http://www.realgm.com/',
# Why: #6164 in Alexa global
'http://www.zamzar.com/',
# Why: #6165 in Alexa global
'http://www.freecharge.in/',
# Why: #6166 in Alexa global
'http://www.ahlamontada.com/',
# Why: #6167 in Alexa global
'http://www.salespider.com/',
# Why: #6168 in Alexa global
'http://www.beanfun.com/',
# Why: #6169 in Alexa global
'http://www.cleveland.com/',
# Why: #6173 in Alexa global
'http://www.truecaller.com/',
# Why: #6174 in Alexa global
'http://www.walmart.ca/',
# Why: #6175 in Alexa global
'http://www.fanbox.com/',
# Why: #6176 in Alexa global
'http://www.designmodo.com/',
# Why: #6177 in Alexa global
'http://www.frip.com/',
# Why: #6178 in Alexa global
'http://www.sammobile.com/',
# Why: #6179 in Alexa global
'http://www.minnano-av.com/',
# Why: #6180 in Alexa global
'http://www.bri.co.id/',
# Why: #6181 in Alexa global
'http://www.creativebloq.com/',
# Why: #6182 in Alexa global
'http://www.anthropologie.com/',
# Why: #6183 in Alexa global
'http://www.afpbb.com/',
# Why: #6184 in Alexa global
'http://www.kingsera.ir/',
# Why: #6185 in Alexa global
'http://www.songspk.co/',
# Why: #6186 in Alexa global
'http://www.sexsearch.com/',
# Why: #6187 in Alexa global
'http://www.dailydot.com/',
# Why: #6188 in Alexa global
'http://www.hayah.cc/',
# Why: #6189 in Alexa global
'http://www.angolotesti.it/',
# Why: #6190 in Alexa global
'http://www.si.kz/',
# Why: #6191 in Alexa global
'http://www.allthingsd.com/',
# Why: #6192 in Alexa global
'http://www.paddypower.com/',
# Why: #6193 in Alexa global
'http://www.canadapost.ca/',
# Why: #6194 in Alexa global
'http://www.qq.cc/',
# Why: #6195 in Alexa global
'http://www.amctheatres.com/',
# Why: #6196 in Alexa global
'http://www.alltop.com/',
# Why: #6197 in Alexa global
'http://www.allkpop.com/',
# Why: #6198 in Alexa global
'http://www.nalog.ru/',
# Why: #6199 in Alexa global
'http://www.dynadot.com/',
# Why: #6200 in Alexa global
'http://www.copart.com/',
# Why: #6201 in Alexa global
'http://www.mexat.com/',
# Why: #6202 in Alexa global
'http://www.skelbiu.lt/',
# Why: #6203 in Alexa global
'http://www.kerala.gov.in/',
# Why: #6204 in Alexa global
'http://www.cathaypacific.com/',
# Why: #6205 in Alexa global
'http://www.clip2ni.com/',
# Why: #6206 in Alexa global
'http://www.tribune.com/',
# Why: #6207 in Alexa global
'http://www.acidcow.com/',
# Why: #6208 in Alexa global
'http://www.amkspor.com/',
# Why: #6209 in Alexa global
'http://www.shiksha.com/',
# Why: #6211 in Alexa global
'http://www.180upload.com/',
# Why: #6212 in Alexa global
'http://www.vietgiaitri.com/',
# Why: #6213 in Alexa global
'http://www.sportsauthority.com/',
# Why: #6214 in Alexa global
'http://www.banki.ir/',
# Why: #6215 in Alexa global
'http://www.vancouversun.com/',
# Why: #6216 in Alexa global
'http://www.hackforums.net/',
# Why: #6217 in Alexa global
'http://www.t-mobile.de/',
# Why: #6218 in Alexa global
'http://www.gree.jp/',
# Why: #6219 in Alexa global
'http://www.simplyrecipes.com/',
# Why: #6220 in Alexa global
'http://www.crazyhomesex.com/',
# Why: #6221 in Alexa global
'http://www.thehindubusinessline.com/',
# Why: #6222 in Alexa global
'http://www.kriesi.at/',
# Why: #6223 in Alexa global
'http://deyi.com/',
# Why: #6224 in Alexa global
'http://www.plimus.com/',
# Why: #6225 in Alexa global
'http://www.websyndic.com/',
# Why: #6226 in Alexa global
'http://www.northnews.cn/',
# Why: #6228 in Alexa global
'http://www.express.com/',
# Why: #6229 in Alexa global
'http://www.dougasouko.com/',
# Why: #6230 in Alexa global
'http://www.mmstat.com/',
# Why: #6231 in Alexa global
'http://www.womai.com/',
# Why: #6232 in Alexa global
'http://www.alrajhibank.com.sa/',
# Why: #6233 in Alexa global
'http://www.ice-porn.com/',
# Why: #6234 in Alexa global
'http://www.benchmarkemail.com/',
# Why: #6235 in Alexa global
'http://www.ringcentral.com/',
# Why: #6236 in Alexa global
'http://www.erail.in/',
# Why: #6237 in Alexa global
'http://www.poptropica.com/',
# Why: #6238 in Alexa global
'http://www.search.ch/',
# Why: #6239 in Alexa global
'http://www.meteo.it/',
# Why: #6240 in Alexa global
'http://www.adriver.ru/',
# Why: #6241 in Alexa global
'http://www.ipeen.com.tw/',
# Why: #6242 in Alexa global
'http://www.ratp.fr/',
# Why: #6243 in Alexa global
'http://www.orgasm.com/',
# Why: #6244 in Alexa global
'http://www.pornme.com/',
# Why: #6245 in Alexa global
'http://www.gameinformer.com/',
# Why: #6246 in Alexa global
'http://www.woobox.com/',
# Why: #6247 in Alexa global
'http://www.advertising.com/',
# Why: #6248 in Alexa global
'http://www.flyflv.com/',
# Why: #6249 in Alexa global
'http://www.chinaren.com/',
# Why: #6250 in Alexa global
'http://www.tube2012.com/',
# Why: #6251 in Alexa global
'http://www.ikhwanonline.com/',
# Why: #6252 in Alexa global
'http://www.iwebtool.com/',
# Why: #6253 in Alexa global
'http://www.ucdavis.edu/',
# Why: #6254 in Alexa global
'http://www.boyfriendtv.com/',
# Why: #6255 in Alexa global
'http://www.rurubu.travel/',
# Why: #6256 in Alexa global
'http://www.kabam.com/',
# Why: #6257 in Alexa global
'http://www.talkingpointsmemo.com/',
# Why: #6258 in Alexa global
'http://www.detnews.com/',
# Why: #6259 in Alexa global
'http://www.sibnet.ru/',
# Why: #6260 in Alexa global
'http://www.camztube.net/',
# Why: #6261 in Alexa global
'http://www.madamenoire.com/',
# Why: #6262 in Alexa global
'http://www.evz.ro/',
# Why: #6263 in Alexa global
'http://www.staseraintv.com/',
# Why: #6264 in Alexa global
'http://www.che168.com/',
# Why: #6265 in Alexa global
'http://www.kidshealth.org/',
# Why: #6266 in Alexa global
'http://www.m24.ru/',
# Why: #6267 in Alexa global
'http://www.zenfolio.com/',
# Why: #6268 in Alexa global
'http://www.webtretho.com/',
# Why: #6269 in Alexa global
'http://www.postjung.com/',
# Why: #6270 in Alexa global
'http://www.supersport.com/',
# Why: #6271 in Alexa global
'http://www.cshtracker.com/',
# Why: #6272 in Alexa global
'http://www.jeuxjeuxjeux.fr/',
# Why: #6273 in Alexa global
'http://www.foxtv.es/',
# Why: #6274 in Alexa global
'http://www.postjoint.com/',
# Why: #6275 in Alexa global
'http://www.honda.co.jp/',
# Why: #6276 in Alexa global
'http://www.podnapisi.net/',
# Why: #6277 in Alexa global
'http://www.prav.tv/',
# Why: #6278 in Alexa global
'http://www.realmadrid.com/',
# Why: #6279 in Alexa global
'http://www.mbs-potsdam.de/',
# Why: #6280 in Alexa global
'http://www.tim.it/',
# Why: #6281 in Alexa global
'http://uplus.metroer.com/~content/',
# Why: #6282 in Alexa global
'http://www.esquire.com/',
# Why: #6283 in Alexa global
'http://ooopic.com/',
# Why: #6284 in Alexa global
'http://www.castorama.fr/',
# Why: #6285 in Alexa global
'http://brides.com.cn/',
# Why: #6286 in Alexa global
'http://www.afamily.vn/',
# Why: #6287 in Alexa global
'http://www.findlaw.com/',
# Why: #6288 in Alexa global
'http://www.smartpassiveincome.com/',
# Why: #6289 in Alexa global
'http://www.sa.ae/',
# Why: #6290 in Alexa global
'http://www.hemnet.se/',
# Why: #6291 in Alexa global
'http://www.diytrade.com/',
# Why: #6292 in Alexa global
'http://www.weblancer.net/',
# Why: #6293 in Alexa global
'http://www.zapmeta.de/',
# Why: #6294 in Alexa global
'http://www.bizsugar.com/',
# Why: #6295 in Alexa global
'http://www.banesco.com/',
# Why: #6296 in Alexa global
'http://www.ideeli.com/',
# Why: #6297 in Alexa global
'http://www.lnx.lu/',
# Why: #6298 in Alexa global
'http://www.divxplanet.com/',
# Why: #6299 in Alexa global
'http://www.aircanada.com/',
# Why: #6300 in Alexa global
'http://uzise.com/',
# Why: #6301 in Alexa global
'http://www.sabay.com.kh/',
# Why: #6302 in Alexa global
'http://www.football365.com/',
# Why: #6303 in Alexa global
'http://www.crazydomains.com.au/',
# Why: #6304 in Alexa global
'http://www.qxox.org/',
# Why: #6305 in Alexa global
'http://www.thesmokinggun.com/',
# Why: #6306 in Alexa global
'http://www.w8n3.info/',
# Why: #6307 in Alexa global
'http://www.po.st/',
# Why: #6308 in Alexa global
'http://www.debian.org/',
# Why: #6309 in Alexa global
'http://www.flypgs.com/',
# Why: #6310 in Alexa global
'http://www.craigslist.co.in/',
# Why: #6311 in Alexa global
'http://www.islamway.net/',
# Why: #6312 in Alexa global
'http://www.research-panel.jp/',
# Why: #6313 in Alexa global
'http://www.debate.com.mx/',
# Why: #6314 in Alexa global
'http://www.bitdefender.com/',
# Why: #6315 in Alexa global
'http://www.listindiario.com/',
# Why: #6316 in Alexa global
'http://www.123telugu.com/',
# Why: #6317 in Alexa global
'http://www.ilbe.com/',
# Why: #6318 in Alexa global
'http://www.wordlinx.com/',
# Why: #6319 in Alexa global
'http://www.ebc.com.br/',
# Why: #6320 in Alexa global
'http://www.pr.gov.br/',
# Why: #6321 in Alexa global
'http://www.videoyoum7.com/',
# Why: #6322 in Alexa global
'http://www.ets.org/',
# Why: #6323 in Alexa global
'http://www.exteen.com/',
# Why: #6324 in Alexa global
'http://www.comicbookresources.com/',
# Why: #6325 in Alexa global
'http://www.grammarly.com/',
# Why: #6326 in Alexa global
'http://www.pdapi.com/',
# Why: #6327 in Alexa global
'http://adultflash01.com/',
# Why: #6328 in Alexa global
'http://www.orlandosentinel.com/',
# Why: #6330 in Alexa global
'http://www.24option.com/',
# Why: #6331 in Alexa global
'http://www.moviepilot.de/',
# Why: #6332 in Alexa global
'http://www.rfa.org/',
# Why: #6333 in Alexa global
'http://www.crateandbarrel.com/',
# Why: #6334 in Alexa global
'http://www.srv2trking.com/',
# Why: #6335 in Alexa global
'http://www.mercusuar.info/',
# Why: #6336 in Alexa global
'http://www.dofus.com/',
# Why: #6337 in Alexa global
'http://www.myfxbook.com/',
# Why: #6338 in Alexa global
'http://www.madmovs.com/',
# Why: #6339 in Alexa global
'http://www.myffi.biz/',
# Why: #6340 in Alexa global
'http://www.peru21.pe/',
# Why: #6341 in Alexa global
'http://www.bollywoodlife.com/',
# Why: #6342 in Alexa global
'http://www.gametracker.com/',
# Why: #6343 in Alexa global
'http://www.terra.com.mx/',
# Why: #6344 in Alexa global
'http://www.antenam.info/',
# Why: #6345 in Alexa global
'http://www.ihotelier.com/',
# Why: #6346 in Alexa global
'http://www.hypebeast.com/',
# Why: #6348 in Alexa global
'http://www.dramasonline.com/',
# Why: #6349 in Alexa global
'http://www.wordtracker.com/',
# Why: #6350 in Alexa global
'http://www.11st.co.kr/',
# Why: #6351 in Alexa global
'http://www.thefrisky.com/',
# Why: #6352 in Alexa global
'http://www.meritnation.com/',
# Why: #6353 in Alexa global
'http://www.irna.ir/',
# Why: #6354 in Alexa global
'http://www.trovit.com/',
# Why: #6355 in Alexa global
'http://cngold.org/',
# Why: #6356 in Alexa global
'http://www.optymalizacja.com/',
# Why: #6357 in Alexa global
'http://www.flexmls.com/',
# Why: #6358 in Alexa global
'http://www.softarchive.net/',
# Why: #6359 in Alexa global
'http://www.divxonline.info/',
# Why: #6360 in Alexa global
'http://www.malaysian-inc.com/',
# Why: #6361 in Alexa global
'http://www.dsw.com/',
# Why: #6362 in Alexa global
'http://www.fantastigames.com/',
# Why: #6363 in Alexa global
'http://www.mattcutts.com/',
# Why: #6364 in Alexa global
'http://www.ziprealty.com/',
# Why: #6365 in Alexa global
'http://www.saavn.com/',
# Why: #6366 in Alexa global
'http://www.ruporn.tv/',
# Why: #6367 in Alexa global
'http://www.e-estekhdam.com/',
# Why: #6368 in Alexa global
'http://www.novafile.com/',
# Why: #6369 in Alexa global
'http://tomsguide.fr/',
# Why: #6370 in Alexa global
'http://www.softonic.jp/',
# Why: #6371 in Alexa global
'http://www.tomshardware.co.uk/',
# Why: #6372 in Alexa global
'http://www.crosswalk.com/',
# Why: #6373 in Alexa global
'http://www.businessdictionary.com/',
# Why: #6374 in Alexa global
'http://www.sharesix.com/',
# Why: #6375 in Alexa global
'http://www.ascii.jp/',
# Why: #6376 in Alexa global
'http://www.travian.cl/',
# Why: #6377 in Alexa global
'http://www.indiastudychannel.com/',
# Why: #6378 in Alexa global
'http://www.m7shsh.com/',
# Why: #6379 in Alexa global
'http://www.hbogo.com/',
# Why: #6380 in Alexa global
'http://www.888casino.it/',
# Why: #6381 in Alexa global
'http://www.fm-p.jp/',
# Why: #6382 in Alexa global
'http://www.keywordspy.com/',
# Why: #6383 in Alexa global
'http://www.pureleverage.com/',
# Why: #6384 in Alexa global
'http://www.photodune.net/',
# Why: #6385 in Alexa global
'http://www.foreignpolicy.com/',
# Why: #6386 in Alexa global
'http://www.shiftdelete.net/',
# Why: #6387 in Alexa global
'http://www.living360.net/',
# Why: #6388 in Alexa global
'http://webmasterhome.cn/',
# Why: #6389 in Alexa global
'http://www.paixie.net/',
# Why: #6390 in Alexa global
'http://www.barstoolsports.com/',
# Why: #6391 in Alexa global
'http://www.babyhome.com.tw/',
# Why: #6392 in Alexa global
'http://www.aemet.es/',
# Why: #6393 in Alexa global
'http://www.local.ch/',
# Why: #6394 in Alexa global
'http://www.spermyporn.com/',
# Why: #6395 in Alexa global
'http://www.tasnimnews.com/',
# Why: #6396 in Alexa global
'http://www.imgserve.net/',
# Why: #6397 in Alexa global
'http://www.huawei.com/',
# Why: #6398 in Alexa global
'http://www.pik.ba/',
# Why: #6399 in Alexa global
'http://www.info-dvd.ru/',
# Why: #6400 in Alexa global
'http://www.2domains.ru/',
# Why: #6401 in Alexa global
'http://www.sextube.fm/',
# Why: #6402 in Alexa global
'http://www.searchrocket.info/',
# Why: #6403 in Alexa global
'http://www.dicio.com.br/',
# Why: #6404 in Alexa global
'http://www.ittefaq.com.bd/',
# Why: #6405 in Alexa global
'http://www.fileserve.com/',
# Why: #6406 in Alexa global
'http://www.genteflow.com/',
# Why: #6407 in Alexa global
'http://www.5giay.vn/',
# Why: #6408 in Alexa global
'http://www.elbadil.com/',
# Why: #6409 in Alexa global
'http://www.wizaz.pl/',
# Why: #6410 in Alexa global
'http://www.cyclingnews.com/',
# Why: #6411 in Alexa global
'http://www.southparkstudios.com/',
# Why: #6412 in Alexa global
'http://www.domain.cn/',
# Why: #6413 in Alexa global
'http://www.hangseng.com/',
# Why: #6414 in Alexa global
'http://www.sankeibiz.jp/',
# Why: #6415 in Alexa global
'http://www.mapsofworld.com/',
# Why: #6416 in Alexa global
'http://gaokao.com/',
# Why: #6417 in Alexa global
'http://www.antarvasna.com/',
# Why: #6418 in Alexa global
'http://www.televisa.com/',
# Why: #6419 in Alexa global
'http://www.dressupwho.com/',
# Why: #6420 in Alexa global
'http://www.goldprice.org/',
# Why: #6421 in Alexa global
'http://www.directlyrics.com/',
# Why: #6422 in Alexa global
'http://www.amway.com.cn/',
# Why: #6423 in Alexa global
'http://www.v2cigar.net/',
# Why: #6424 in Alexa global
'http://www.peopleclick.com/',
# Why: #6425 in Alexa global
'http://www.moudamepo.com/',
# Why: #6426 in Alexa global
'http://www.baijob.com/',
# Why: #6427 in Alexa global
'http://www.geni.com/',
# Why: #6428 in Alexa global
'http://huangye88.com/',
# Why: #6429 in Alexa global
'http://www.phun.org/',
# Why: #6430 in Alexa global
'http://www.kasikornbankgroup.com/',
# Why: #6431 in Alexa global
'http://www.angrymovs.com/',
# Why: #6432 in Alexa global
'http://www.bibliocommons.com/',
# Why: #6433 in Alexa global
'http://www.melateiran.com/',
# Why: #6434 in Alexa global
'http://www.gigya.com/',
# Why: #6435 in Alexa global
'http://17ok.com/',
# Why: #6436 in Alexa global
'http://www.ename.cn/',
# Why: #6437 in Alexa global
'http://www.xdowns.com/',
# Why: #6438 in Alexa global
'http://www.tportal.hr/',
# Why: #6439 in Alexa global
'http://www.dreamteammoney.com/',
# Why: #6440 in Alexa global
'http://www.prevention.com/',
# Why: #6441 in Alexa global
'http://www.terra.cl/',
# Why: #6442 in Alexa global
'http://www.blinklist.com/',
# Why: #6443 in Alexa global
'http://www.51seer.com/',
# Why: #6444 in Alexa global
'http://www.ruelsoft.com/',
# Why: #6445 in Alexa global
'http://www.kulichki.net/',
# Why: #6446 in Alexa global
'http://vippers.jp/',
# Why: #6447 in Alexa global
'http://www.tatatele.in/',
# Why: #6448 in Alexa global
'http://www.mybloggertricks.com/',
# Why: #6449 in Alexa global
'http://www.ma-bimbo.com/',
# Why: #6450 in Alexa global
'http://www.ftchinese.com/',
# Why: #6451 in Alexa global
'http://www.sergey-mavrodi-mmm.net/',
# Why: #6452 in Alexa global
'http://www.wp.tv/',
# Why: #6453 in Alexa global
'http://www.chevrolet.com/',
# Why: #6454 in Alexa global
'http://www.razerzone.com/',
# Why: #6455 in Alexa global
'http://www.submanga.com/',
# Why: #6456 in Alexa global
'http://www.thomson.co.uk/',
# Why: #6457 in Alexa global
'http://www.syosetu.org/',
# Why: #6458 in Alexa global
'http://www.olx.com/',
# Why: #6459 in Alexa global
'http://www.vplay.ro/',
# Why: #6460 in Alexa global
'http://www.rtnn.net/',
# Why: #6461 in Alexa global
'http://www.55.la/',
# Why: #6462 in Alexa global
'http://www.instructure.com/',
# Why: #6463 in Alexa global
'http://lvse.com/',
# Why: #6464 in Alexa global
'http://www.hvg.hu/',
# Why: #6465 in Alexa global
'http://www.androidpolice.com/',
# Why: #6466 in Alexa global
'http://www.cookinglight.com/',
# Why: #6467 in Alexa global
'http://www.madadsmedia.com/',
# Why: #6468 in Alexa global
'http://www.inews.gr/',
# Why: #6469 in Alexa global
'http://www.ktxp.com/',
# Why: #6470 in Alexa global
'http://www.socialsecurity.gov/',
# Why: #6471 in Alexa global
'http://www.equifax.com/',
# Why: #6472 in Alexa global
'http://www.ceskatelevize.cz/',
# Why: #6473 in Alexa global
'http://www.gaaks.com/',
# Why: #6474 in Alexa global
'http://www.chillingeffects.org/',
# Why: #6476 in Alexa global
'http://www.komando.com/',
# Why: #6477 in Alexa global
'http://www.nowpublic.com/',
# Why: #6478 in Alexa global
'http://www.khanwars.ae/',
# Why: #6479 in Alexa global
'http://www.berlin.de/',
# Why: #6480 in Alexa global
'http://www.bleepingcomputer.com/',
# Why: #6481 in Alexa global
'http://www.military.com/',
# Why: #6482 in Alexa global
'http://www.zero10.net/',
# Why: #6483 in Alexa global
'http://www.onekingslane.com/',
# Why: #6484 in Alexa global
'http://www.beget.ru/',
# Why: #6486 in Alexa global
'http://www.get-tune.net/',
# Why: #6487 in Alexa global
'http://www.freewebs.com/',
# Why: #6489 in Alexa global
'http://www.591.com.tw/',
# Why: #6490 in Alexa global
'http://www.pcfinancial.ca/',
# Why: #6491 in Alexa global
'http://www.sparknotes.com/',
# Why: #6492 in Alexa global
'http://www.tinychat.com/',
# Why: #6493 in Alexa global
'http://luxup.ru/',
# Why: #6494 in Alexa global
'http://www.geforce.com/',
# Why: #6495 in Alexa global
'http://www.tatts.com.au/',
# Why: #6496 in Alexa global
'http://www.alweeam.com.sa/',
# Why: #6497 in Alexa global
'http://www.123-reg.co.uk/',
# Why: #6498 in Alexa global
'http://www.sexyswingertube.com/',
# Why: #6499 in Alexa global
'http://www.groupon.es/',
# Why: #6500 in Alexa global
'http://www.guardianlv.com/',
# Why: #6501 in Alexa global
'http://www.hypovereinsbank.de/',
# Why: #6502 in Alexa global
'http://www.game2.com.cn/',
# Why: #6503 in Alexa global
'http://www.mofcom.gov.cn/',
# Why: #6504 in Alexa global
'http://www.usc.edu/',
# Why: #6505 in Alexa global
'http://www.ard.de/',
# Why: #6506 in Alexa global
'http://www.hoovers.com/',
# Why: #6507 in Alexa global
'http://www.tdameritrade.com/',
# Why: #6508 in Alexa global
'http://www.userscripts.org/',
# Why: #6509 in Alexa global
'http://app111.com/',
# Why: #6510 in Alexa global
'http://www.al.com/',
# Why: #6511 in Alexa global
'http://www.op.fi/',
# Why: #6512 in Alexa global
'http://www.adbkm.com/',
# Why: #6513 in Alexa global
'http://www.i-part.com.tw/',
# Why: #6514 in Alexa global
'http://www.pivithurutv.info/',
# Why: #6515 in Alexa global
'http://www.haber3.com/',
# Why: #6516 in Alexa global
'http://www.shatel.ir/',
# Why: #6517 in Alexa global
'http://www.camonster.com/',
# Why: #6518 in Alexa global
'http://www.weltbild.de/',
# Why: #6519 in Alexa global
'http://www.pingan.com.cn/',
# Why: #6520 in Alexa global
'http://www.advanceautoparts.com/',
# Why: #6521 in Alexa global
'http://www.mplssaturn.com/',
# Why: #6522 in Alexa global
'http://www.weeklystandard.com/',
# Why: #6523 in Alexa global
'http://www.cna.com.tw/',
# Why: #6524 in Alexa global
'http://www.popscreen.com/',
# Why: #6525 in Alexa global
'http://www.freelifetimefuckbook.com/',
# Why: #6526 in Alexa global
'http://www.peixeurbano.com.br/',
# Why: #6527 in Alexa global
'http://www.2258.com/',
# Why: #6528 in Alexa global
'http://www.proxfree.com/',
# Why: #6529 in Alexa global
'http://www.zend.com/',
# Why: #6530 in Alexa global
'http://www.garena.tw/',
# Why: #6531 in Alexa global
'http://www.citehr.com/',
# Why: #6532 in Alexa global
'http://www.gadyd.com/',
# Why: #6533 in Alexa global
'http://www.tvspielfilm.de/',
# Why: #6534 in Alexa global
'http://www.skapiec.pl/',
# Why: #6535 in Alexa global
'http://www.9see.com/',
# Why: #6536 in Alexa global
'http://cndns.com/',
# Why: #6537 in Alexa global
'http://www.hurriyetemlak.com/',
# Why: #6538 in Alexa global
'http://www.census.gov/',
# Why: #6539 in Alexa global
'http://www.collider.com/',
# Why: #6540 in Alexa global
'http://www.cinaplay.com/',
# Why: #6542 in Alexa global
'http://www.aq.com/',
# Why: #6543 in Alexa global
'http://www.aolsearch.com/',
# Why: #6544 in Alexa global
'http://www.ce4arab.com/',
# Why: #6546 in Alexa global
'http://www.cbi.ir/',
# Why: #6547 in Alexa global
'http://cjol.com/',
# Why: #6548 in Alexa global
'http://www.brandporno.com/',
# Why: #6549 in Alexa global
'http://www.yicheshi.com/',
# Why: #6550 in Alexa global
'http://www.mydealz.de/',
# Why: #6551 in Alexa global
'http://www.xiachufang.com/',
# Why: #6552 in Alexa global
'http://www.sun-sentinel.com/',
# Why: #6553 in Alexa global
'http://www.flashkhor.com/',
# Why: #6554 in Alexa global
'http://www.join.me/',
# Why: #6555 in Alexa global
'http://www.hankyung.com/',
# Why: #6556 in Alexa global
'http://www.oneandone.co.uk/',
# Why: #6557 in Alexa global
'http://www.derwesten.de/',
# Why: #6558 in Alexa global
'http://www.gammae.com/',
# Why: #6559 in Alexa global
'http://www.webadultdating.biz/',
# Why: #6560 in Alexa global
'http://www.pokerstars.com/',
# Why: #6561 in Alexa global
'http://www.fucked-sex.com/',
# Why: #6562 in Alexa global
'http://www.antaranews.com/',
# Why: #6563 in Alexa global
'http://www.banorte.com/',
# Why: #6564 in Alexa global
'http://www.travian.it/',
# Why: #6565 in Alexa global
'http://www.msu.edu/',
# Why: #6566 in Alexa global
'http://www.ozbargain.com.au/',
# Why: #6567 in Alexa global
'http://www.77vcd.com/',
# Why: #6568 in Alexa global
'http://www.bestooxx.com/',
# Why: #6569 in Alexa global
'http://www.siemens.com/',
# Why: #6570 in Alexa global
'http://www.en-japan.com/',
# Why: #6571 in Alexa global
'http://www.akbank.com/',
# Why: #6572 in Alexa global
'http://www.srf.ch/',
# Why: #6573 in Alexa global
'http://www.meijer.com/',
# Why: #6574 in Alexa global
'http://www.htmldrive.net/',
# Why: #6575 in Alexa global
'http://www.peoplestylewatch.com/',
# Why: #6576 in Alexa global
'http://www.4008823823.com.cn/',
# Why: #6577 in Alexa global
'http://www.boards.ie/',
# Why: #6578 in Alexa global
'http://www.zhulong.com/',
# Why: #6579 in Alexa global
'http://www.svyaznoybank.ru/',
# Why: #6580 in Alexa global
'http://www.myfilestore.com/',
# Why: #6581 in Alexa global
'http://www.sucuri.net/',
# Why: #6582 in Alexa global
'http://www.redflagdeals.com/',
# Why: #6583 in Alexa global
'http://www.gxnews.com.cn/',
# Why: #6584 in Alexa global
'http://www.javascriptkit.com/',
# Why: #6585 in Alexa global
'http://www.edreams.fr/',
# Why: #6586 in Alexa global
'http://www.wral.com/',
# Why: #6587 in Alexa global
'http://www.togetter.com/',
# Why: #6588 in Alexa global
'http://www.dmi.dk/',
# Why: #6589 in Alexa global
'http://www.thinkdigit.com/',
# Why: #6590 in Alexa global
'http://www.barclaycard.co.uk/',
# Why: #6591 in Alexa global
'http://www.comm100.com/',
# Why: #6592 in Alexa global
'http://www.christianbook.com/',
# Why: #6593 in Alexa global
'http://www.popularmechanics.com/',
# Why: #6594 in Alexa global
'http://www.taste.com.au/',
# Why: #6595 in Alexa global
'http://www.tripadvisor.ru/',
# Why: #6596 in Alexa global
'http://www.colissimo.fr/',
# Why: #6597 in Alexa global
'http://www.gdposir.info/',
# Why: #6598 in Alexa global
'http://www.rarlab.com/',
# Why: #6599 in Alexa global
'http://www.dcnepalevent.com/',
# Why: #6600 in Alexa global
'http://www.sagepub.com/',
# Why: #6601 in Alexa global
'http://www.markosweb.com/',
# Why: #6602 in Alexa global
'http://www.france3.fr/',
# Why: #6603 in Alexa global
'http://www.mindbodyonline.com/',
# Why: #6604 in Alexa global
'http://www.yapo.cl/',
# Why: #6605 in Alexa global
'http://www.0-6.com/',
# Why: #6606 in Alexa global
'http://www.dilbert.com/',
# Why: #6607 in Alexa global
'http://www.searchqu.com/',
# Why: #6608 in Alexa global
'http://www.usa.gov/',
# Why: #6609 in Alexa global
'http://www.vatandownload.com/',
# Why: #6610 in Alexa global
'http://www.nastymovs.com/',
# Why: #6611 in Alexa global
'http://www.santanderrio.com.ar/',
# Why: #6612 in Alexa global
'http://www.notebookcheck.net/',
# Why: #6613 in Alexa global
'http://www.canalplus.fr/',
# Why: #6614 in Alexa global
'http://www.coocan.jp/',
# Why: #6615 in Alexa global
'http://www.goodreads.com/user/show/',
# Why: #6616 in Alexa global
'http://www.epa.gov/',
# Why: #6617 in Alexa global
'http://www.disp.cc/',
# Why: #6618 in Alexa global
'http://www.hotsales.net/',
# Why: #6619 in Alexa global
'http://www.interpals.net/',
# Why: #6620 in Alexa global
'http://www.vz.ru/',
# Why: #6621 in Alexa global
'http://www.flyertalk.com/',
# Why: #6622 in Alexa global
'http://www.pjmedia.com/',
# Why: #6623 in Alexa global
'http://www.solomid.net/',
# Why: #6624 in Alexa global
'http://www.megaplan.ru/',
# Why: #6625 in Alexa global
'http://www.hatenablog.com/',
# Why: #6626 in Alexa global
'http://www.getsatisfaction.com/',
# Why: #6627 in Alexa global
'http://www.hotline.ua/',
# Why: #6628 in Alexa global
'http://www.alternativeto.net/',
# Why: #6629 in Alexa global
'http://www.hipfile.com/',
# Why: #6630 in Alexa global
'http://www.247sports.com/',
# Why: #6631 in Alexa global
'http://www.phpnuke.org/',
# Why: #6632 in Alexa global
'http://www.indiaresults.com/',
# Why: #6633 in Alexa global
'http://www.prisjakt.nu/',
# Why: #6634 in Alexa global
'http://www.1tvlive.in/',
# Why: #6635 in Alexa global
'http://www.e-mai.net/',
# Why: #6636 in Alexa global
'http://www.trafficg.com/',
# Why: #6637 in Alexa global
'http://www.ojogo.pt/',
# Why: #6638 in Alexa global
'http://www.totaldomination.com/',
# Why: #6639 in Alexa global
'http://www.eroino.net/',
# Why: #6640 in Alexa global
'http://www.network-tools.com/',
# Why: #6641 in Alexa global
'http://www.unibytes.com/',
# Why: #6642 in Alexa global
'http://www.seriouseats.com/',
# Why: #6643 in Alexa global
'http://www.twicsy.com/',
# Why: #6644 in Alexa global
'http://www.smbc-card.com/',
# Why: #6645 in Alexa global
'http://toocle.com/',
# Why: #6646 in Alexa global
'http://www.unbounce.com/',
# Why: #6647 in Alexa global
'http://www.2tu.cc/',
# Why: #6648 in Alexa global
'http://www.computerworld.com/',
# Why: #6649 in Alexa global
'http://www.clicktrackprofit.com/',
# Why: #6650 in Alexa global
'http://www.serialu.net/',
# Why: #6651 in Alexa global
'http://www.realfarmacy.com/',
# Why: #6652 in Alexa global
'http://metrodeal.com/',
# Why: #6653 in Alexa global
'http://www.binzhi.com/',
# Why: #6654 in Alexa global
'http://www.smilebox.com/',
# Why: #6655 in Alexa global
'http://www.coderanch.com/',
# Why: #6656 in Alexa global
'http://www.uptodown.com/',
# Why: #6657 in Alexa global
'http://www.vbulletin.com/',
# Why: #6658 in Alexa global
'http://www.teasernet.com/',
# Why: #6659 in Alexa global
'http://www.hulu.jp/',
# Why: #6660 in Alexa global
'http://www.admob.com/',
# Why: #6661 in Alexa global
'http://www.fingerhut.com/',
# Why: #6662 in Alexa global
'http://www.urlopener.com/',
# Why: #6663 in Alexa global
'http://www.vi.nl/',
# Why: #6664 in Alexa global
'http://www.gamebase.com.tw/',
# Why: #6665 in Alexa global
'http://www.expedia.de/',
# Why: #6666 in Alexa global
'http://www.thekrazycouponlady.com/',
# Why: #6667 in Alexa global
'http://www.linezing.com/',
# Why: #6668 in Alexa global
'http://www.metropcs.com/',
# Why: #6670 in Alexa global
'http://www.draugas.lt/',
# Why: #6671 in Alexa global
'http://www.minecraftdl.com/',
# Why: #6672 in Alexa global
'http://www.airberlin.com/',
# Why: #6673 in Alexa global
'http://www.eelly.com/',
# Why: #6674 in Alexa global
'http://www.siamsport.co.th/',
# Why: #6675 in Alexa global
'http://www.e-junkie.com/',
# Why: #6676 in Alexa global
'http://www.gulte.com/',
# Why: #6677 in Alexa global
'http://www.lazada.com.ph/',
# Why: #6678 in Alexa global
'http://www.cnwnews.com/',
# Why: #6679 in Alexa global
'http://www.tekstowo.pl/',
# Why: #6680 in Alexa global
'http://www.flavorwire.com/',
# Why: #6681 in Alexa global
'http://www.settrade.com/',
# Why: #6682 in Alexa global
'http://www.francetv.fr/',
# Why: #6683 in Alexa global
'http://www.experian.com/',
# Why: #6684 in Alexa global
'http://www.bravenet.com/',
# Why: #6685 in Alexa global
'http://www.mytoys.de/',
# Why: #6686 in Alexa global
'http://www.inkthemes.com/',
# Why: #6687 in Alexa global
'http://www.brobible.com/',
# Why: #6688 in Alexa global
'http://www.sarenza.com/',
# Why: #6689 in Alexa global
'http://www.curse.com/',
# Why: #6690 in Alexa global
'http://www.iresearch.cn/',
# Why: #6691 in Alexa global
'http://www.lohaco.jp/',
# Why: #6692 in Alexa global
'http://www.7sur7.be/',
# Why: #6693 in Alexa global
'http://www.iberia.com/',
# Why: #6694 in Alexa global
'http://www.trovit.es/',
# Why: #6695 in Alexa global
'http://www.eiga.com/',
# Why: #6696 in Alexa global
'http://saga123.cn/',
# Why: #6697 in Alexa global
'http://www.getuploader.com/',
# Why: #6698 in Alexa global
'http://www.sevendollarptc.com/',
# Why: #6699 in Alexa global
'http://www.amadeus.com/',
# Why: #6700 in Alexa global
'http://www.thedailystar.net/',
# Why: #6701 in Alexa global
'http://www.gofuckbiz.com/',
# Why: #6702 in Alexa global
'http://www.codepen.io/',
# Why: #6703 in Alexa global
'http://www.virginia.gov/',
# Why: #6704 in Alexa global
'http://www.linguee.fr/',
# Why: #6705 in Alexa global
'http://www.space.com/',
# Why: #6706 in Alexa global
'http://www.astrology.com/',
# Why: #6707 in Alexa global
'http://www.whmcs.com/',
# Why: #6708 in Alexa global
'http://www.blogher.com/',
# Why: #6709 in Alexa global
'http://www.netpnb.com/',
# Why: #6710 in Alexa global
'http://www.mojo-themes.com/',
# Why: #6711 in Alexa global
'http://www.cam4.es/',
# Why: #6712 in Alexa global
'http://www.bestwestern.com/',
# Why: #6713 in Alexa global
'http://www.gencat.cat/',
# Why: #6714 in Alexa global
'http://www.healthcentral.com/',
# Why: #6715 in Alexa global
'http://www.ru-board.com/',
# Why: #6716 in Alexa global
'http://www.tjsp.jus.br/',
# Why: #6717 in Alexa global
'http://www.scene7.com/',
# Why: #6718 in Alexa global
'http://www.bukalapak.com/',
# Why: #6720 in Alexa global
'http://www.intporn.com/',
# Why: #6721 in Alexa global
'http://www.xe.gr/',
# Why: #6722 in Alexa global
'http://www.leprosorium.ru/',
# Why: #6723 in Alexa global
'http://www.dytt8.net/',
# Why: #6724 in Alexa global
'http://www.wpcentral.com/',
# Why: #6725 in Alexa global
'http://www.fasttrafficformula.com/',
# Why: #6726 in Alexa global
'http://www.hugefiles.net/',
# Why: #6727 in Alexa global
'http://www.you-sex-tube.com/',
# Why: #6728 in Alexa global
'http://www.naukrigulf.com/',
# Why: #6729 in Alexa global
'http://5173.com/',
# Why: #6730 in Alexa global
'http://www.comicvip.com/',
# Why: #6731 in Alexa global
'http://www.jossandmain.com/',
# Why: #6732 in Alexa global
'http://www.motherjones.com/',
# Why: #6733 in Alexa global
'http://www.planet.fr/',
# Why: #6734 in Alexa global
'http://www.thomascook.com/',
# Why: #6735 in Alexa global
'http://www.deseretnews.com/',
# Why: #6736 in Alexa global
'http://www.aawsat.com/',
# Why: #6737 in Alexa global
'http://www.huntington.com/',
# Why: #6738 in Alexa global
'http://www.desimartini.com/',
# Why: #6739 in Alexa global
'http://www.maloumaa.blogspot.com/',
# Why: #6740 in Alexa global
'http://www.rutgers.edu/',
# Why: #6741 in Alexa global
'http://www.gratisjuegos.org/',
# Why: #6742 in Alexa global
'http://www.carsforsale.com/',
# Why: #6743 in Alexa global
'http://www.filestore72.info/',
# Why: #6744 in Alexa global
'http://www.neowin.net/',
# Why: #6745 in Alexa global
'http://www.ilgiornale.it/',
# Why: #6746 in Alexa global
'http://www.download0098.com/',
# Why: #6747 in Alexa global
'http://www.providesupport.com/',
# Why: #6748 in Alexa global
'http://www.postini.com/',
# Why: #6749 in Alexa global
'http://www.sinowaypromo.com/',
# Why: #6750 in Alexa global
'http://www.watchop.com/',
# Why: #6751 in Alexa global
'http://www.docusign.net/',
# Why: #6752 in Alexa global
'http://www.sourcenext.com/',
# Why: #6753 in Alexa global
'http://www.finviz.com/',
# Why: #6754 in Alexa global
'http://www.babyoye.com/',
# Why: #6755 in Alexa global
'http://www.andhrajyothy.com/',
# Why: #6756 in Alexa global
'http://www.gamezer.com/',
# Why: #6757 in Alexa global
'http://www.baozoumanhua.com/',
# Why: #6758 in Alexa global
'http://www.niusnews.com/',
# Why: #6759 in Alexa global
'http://www.yabancidiziizle.net/',
# Why: #6760 in Alexa global
'http://www.fodors.com/',
# Why: #6761 in Alexa global
'http://www.moonsy.com/',
# Why: #6762 in Alexa global
'http://www.lidl.it/',
# Why: #6763 in Alexa global
'http://www.betanews.com/',
# Why: #6764 in Alexa global
'http://www.auone.jp/',
# Why: #6765 in Alexa global
'http://www.escapistmagazine.com/',
# Why: #6766 in Alexa global
'http://www.markethealth.com/',
# Why: #6767 in Alexa global
'http://www.clicksure.com/',
# Why: #6768 in Alexa global
'http://www.aircel.com/',
# Why: #6769 in Alexa global
'http://www.metacrawler.com/',
# Why: #6770 in Alexa global
'http://www.aeat.es/',
# Why: #6771 in Alexa global
'http://www.allafrica.com/',
# Why: #6772 in Alexa global
'http://www.watchseries-online.eu/',
# Why: #6773 in Alexa global
'http://www.adpost.com/',
# Why: #6774 in Alexa global
'http://www.adac.de/',
# Why: #6775 in Alexa global
'http://www.similarweb.com/',
# Why: #6776 in Alexa global
'http://www.offervault.com/',
# Why: #6777 in Alexa global
'http://www.uolhost.com.br/',
# Why: #6778 in Alexa global
'http://www.moviestarplanet.com/',
# Why: #6779 in Alexa global
'http://www.overclockers.ru/',
# Why: #6780 in Alexa global
'http://www.rocketlanguages.com/',
# Why: #6781 in Alexa global
'http://www.finya.de/',
# Why: #6782 in Alexa global
'http://www.shahvani.com/',
# Why: #6783 in Alexa global
'http://www.firmy.cz/',
# Why: #6784 in Alexa global
'http://www.incometaxindia.gov.in/',
# Why: #6785 in Alexa global
'http://www.ecostream.tv/',
# Why: #6786 in Alexa global
'http://www.pcwelt.de/',
# Why: #6787 in Alexa global
'http://www.arcadesafari.com/',
# Why: #6788 in Alexa global
'http://www.shoghlanty.com/',
# Why: #6789 in Alexa global
'http://www.videosection.com/',
# Why: #6790 in Alexa global
'http://www.jcb.co.jp/',
# Why: #6792 in Alexa global
'http://www.centauro.com.br/',
# Why: #6793 in Alexa global
'http://www.eroanimedouga.net/',
# Why: #6794 in Alexa global
'http://www.orientaltrading.com/',
# Why: #6795 in Alexa global
'http://www.tsutaya.co.jp/',
# Why: #6796 in Alexa global
'http://www.ogone.com/',
# Why: #6798 in Alexa global
'http://www.sexlog.com/',
# Why: #6799 in Alexa global
'http://www.hotair.com/',
# Why: #6800 in Alexa global
'http://www.egypt.gov.eg/',
# Why: #6801 in Alexa global
'http://www.thomasnet.com/',
# Why: #6802 in Alexa global
'http://www.virustotal.com/',
# Why: #6803 in Alexa global
'http://www.hayneedle.com/',
# Why: #6804 in Alexa global
'http://www.fatburningfurnace.com/',
# Why: #6805 in Alexa global
'http://www.lovedgames.com/',
# Why: #6806 in Alexa global
'http://www.gov.cn/',
# Why: #6807 in Alexa global
'http://www.23us.com/',
# Why: #6808 in Alexa global
'http://www.trafficcaptain.com/',
# Why: #6810 in Alexa global
'http://www.v2cigs.com/',
# Why: #6811 in Alexa global
'http://www.teknosa.com.tr/',
# Why: #6812 in Alexa global
'http://www.skrill.com/',
# Why: #6813 in Alexa global
'http://www.puritanas.com/',
# Why: #6814 in Alexa global
'http://www.selfgrowth.com/',
# Why: #6815 in Alexa global
'http://www.ikco.com/',
# Why: #6816 in Alexa global
'http://www.saisoncard.co.jp/',
# Why: #6817 in Alexa global
'http://www.cuisineaz.com/',
# Why: #6818 in Alexa global
'http://www.causes.com/',
# Why: #6819 in Alexa global
'http://www.democraticunderground.com/',
# Why: #6820 in Alexa global
'http://www.placesexy.com/',
# Why: #6821 in Alexa global
'http://www.expedia.co.uk/',
# Why: #6822 in Alexa global
'http://www.www-com.co/',
# Why: #6823 in Alexa global
'http://www.topmongol.com/',
# Why: #6824 in Alexa global
'http://www.hikaritube.com/',
# Why: #6825 in Alexa global
'http://www.amakings.com/',
# Why: #6826 in Alexa global
'http://www.fxstreet.com/',
# Why: #6827 in Alexa global
'http://www.consultant.ru/',
# Why: #6828 in Alexa global
'http://www.sacbee.com/',
# Why: #6829 in Alexa global
'http://www.supercheats.com/',
# Why: #6830 in Alexa global
'http://www.sofunnylol.com/',
# Why: #6831 in Alexa global
'http://www.muzy.com/',
# Why: #6832 in Alexa global
'http://www.sparda.de/',
# Why: #6833 in Alexa global
'http://www.caughtoffside.com/',
# Why: #6834 in Alexa global
'http://www.chinawomendating.asia/',
# Why: #6835 in Alexa global
'http://www.xmeeting.com/',
# Why: #6836 in Alexa global
'http://www.google.al/',
# Why: #6837 in Alexa global
'http://www.sovereignbank.com/',
# Why: #6838 in Alexa global
'http://www.animeflv.net/',
# Why: #6839 in Alexa global
'http://www.sky.de/',
# Why: #6840 in Alexa global
'http://www.huatu.com/',
# Why: #6841 in Alexa global
'http://www.payscale.com/',
# Why: #6842 in Alexa global
'http://www.quotidiano.net/',
# Why: #6843 in Alexa global
'http://www.pol.ir/',
# Why: #6844 in Alexa global
'http://www.digital-photography-school.com/',
# Why: #6845 in Alexa global
'http://www.screencrush.com/',
# Why: #6846 in Alexa global
'http://www.battlenet.com.cn/',
# Why: #6847 in Alexa global
'http://www.netgear.com/',
# Why: #6848 in Alexa global
'http://www.thebiglistofporn.com/',
# Why: #6849 in Alexa global
'http://www.similarsitesearch.com/',
# Why: #6850 in Alexa global
'http://www.peb.pl/',
# Why: #6851 in Alexa global
'http://www.lanrentuku.com/',
# Why: #6852 in Alexa global
'http://www.ksu.edu.sa/',
# Why: #6853 in Alexa global
'http://www.tradetracker.com/',
# Why: #6855 in Alexa global
'http://www.d.cn/',
# Why: #6856 in Alexa global
'http://www.avito.ma/',
# Why: #6857 in Alexa global
'http://www.projectfree.tv/',
# Why: #6858 in Alexa global
'http://www.cmu.edu/',
# Why: #6859 in Alexa global
'http://www.imore.com/',
# Why: #6860 in Alexa global
'http://www.tickld.com/',
# Why: #6861 in Alexa global
'http://www.fitday.com/',
# Why: #6862 in Alexa global
'http://www.dulcebank.com/',
# Why: #6863 in Alexa global
'http://www.careerdonkey.com/',
# Why: #6864 in Alexa global
'http://www.pf.pl/',
# Why: #6865 in Alexa global
'http://www.otzovik.com/',
# Why: #6866 in Alexa global
'http://www.baltimoresun.com/',
# Why: #6867 in Alexa global
'http://www.ponpare.jp/',
# Why: #6868 in Alexa global
'http://www.jobvite.com/',
# Why: #6869 in Alexa global
'http://www.ratemyprofessors.com/',
# Why: #6870 in Alexa global
'http://www.bancodevenezuela.com/',
# Why: #6871 in Alexa global
'http://www.linkafarin.com/',
# Why: #6872 in Alexa global
'http://www.ufxmarkets.com/',
# Why: #6873 in Alexa global
'http://www.lavozdegalicia.es/',
# Why: #6874 in Alexa global
'http://www.99bill.com/',
# Why: #6875 in Alexa global
'http://www.punyu.com/',
# Why: #6876 in Alexa global
'http://www.otodom.pl/',
# Why: #6877 in Alexa global
'http://www.entireweb.com/',
# Why: #6878 in Alexa global
'http://www.fastshop.com.br/',
# Why: #6879 in Alexa global
'http://www.imgnip.com/',
# Why: #6880 in Alexa global
'http://www.goodlife.com/',
# Why: #6881 in Alexa global
'http://www.caringbridge.org/',
# Why: #6882 in Alexa global
'http://www.pistonheads.com/',
# Why: #6883 in Alexa global
'http://www.gun.az/',
# Why: #6884 in Alexa global
'http://www.1and1.es/',
# Why: #6885 in Alexa global
'http://www.photofunia.com/',
# Why: #6886 in Alexa global
'http://www.nme.com/',
# Why: #6887 in Alexa global
'http://www.japannetbank.co.jp/',
# Why: #6888 in Alexa global
'http://www.carfax.com/',
# Why: #6889 in Alexa global
'http://www.gutenberg.org/',
# Why: #6890 in Alexa global
'http://www.youxixiazai.org/',
# Why: #6891 in Alexa global
'http://www.webmastersitesi.com/',
# Why: #6892 in Alexa global
'http://www.skynet.be/',
# Why: #6893 in Alexa global
'http://www.afrointroductions.com/',
# Why: #6894 in Alexa global
'http://www.mp3slash.net/',
# Why: #6895 in Alexa global
'http://www.netzwelt.de/',
# Why: #6896 in Alexa global
'http://www.ecrater.com/',
# Why: #6897 in Alexa global
'http://www.livemint.com/',
# Why: #6898 in Alexa global
'http://www.worldwinner.com/',
# Why: #6899 in Alexa global
'http://www.echosign.com/',
# Why: #6900 in Alexa global
'http://www.cromaretail.com/',
# Why: #6901 in Alexa global
'http://www.freewebcamporntube.com/',
# Why: #6902 in Alexa global
'http://www.admin.ch/',
# Why: #6903 in Alexa global
'http://www.allstate.com/',
# Why: #6904 in Alexa global
'http://www.photoscape.org/',
# Why: #6905 in Alexa global
'http://www.cv-library.co.uk/',
# Why: #6906 in Alexa global
'http://www.voici.fr/',
# Why: #6907 in Alexa global
'http://www.wdr.de/',
# Why: #6908 in Alexa global
'http://www.pbase.com/',
# Why: #6909 in Alexa global
'http://www.mycenturylink.com/',
# Why: #6910 in Alexa global
'http://www.sonicomusica.com/',
# Why: #6911 in Alexa global
'http://www.schema.org/',
# Why: #6912 in Alexa global
'http://www.smashwords.com/',
# Why: #6913 in Alexa global
'http://www.al3ab.net/',
# Why: #6914 in Alexa global
'http://muryouav.net/',
# Why: #6915 in Alexa global
'http://www.mocospace.com/',
# Why: #6916 in Alexa global
'http://www.fundsxpress.com/',
# Why: #6917 in Alexa global
'http://www.chrisc.com/',
# Why: #6918 in Alexa global
'http://www.poemhunter.com/',
# Why: #6919 in Alexa global
'http://www.cupid.com/',
# Why: #6920 in Alexa global
'http://www.timescity.com/',
# Why: #6921 in Alexa global
'http://www.banglamail24.com/',
# Why: #6922 in Alexa global
'http://www.motika.com.mk/',
# Why: #6923 in Alexa global
'http://www.sec.gov/',
# Why: #6924 in Alexa global
'http://www.go.cn/',
# Why: #6925 in Alexa global
'http://www.whatculture.com/',
# Why: #6926 in Alexa global
'http://www.namepros.com/',
# Why: #6927 in Alexa global
'http://www.vsemayki.ru/',
# Why: #6928 in Alexa global
'http://www.hip2save.com/',
# Why: #6929 in Alexa global
'http://www.hotnews.ro/',
# Why: #6930 in Alexa global
'http://www.vietbao.vn/',
# Why: #6931 in Alexa global
'http://inazumanews2.com/',
# Why: #6932 in Alexa global
'http://www.irokotv.com/',
# Why: #6933 in Alexa global
'http://www.appthemes.com/',
# Why: #6934 in Alexa global
'http://www.tirerack.com/',
# Why: #6935 in Alexa global
'http://www.maxpark.com/',
# Why: #6936 in Alexa global
'http://wed114.cn/',
# Why: #6937 in Alexa global
'http://www.successfactors.com/',
# Why: #6938 in Alexa global
'http://www.sba.gov/',
# Why: #6939 in Alexa global
'http://www.hk-porno.com/',
# Why: #6940 in Alexa global
'http://www.setlinks.ru/',
# Why: #6941 in Alexa global
'http://www.travel24.com/',
# Why: #6942 in Alexa global
'http://www.qatarliving.com/',
# Why: #6943 in Alexa global
'http://www.hotlog.ru/',
# Why: #6944 in Alexa global
'http://rapmls.com/',
# Why: #6945 in Alexa global
'http://www.qualityhealth.com/',
# Why: #6946 in Alexa global
'http://www.linkcollider.com/',
# Why: #6947 in Alexa global
'http://www.kashtanka.com/',
# Why: #6948 in Alexa global
'http://www.hightail.com/',
# Why: #6949 in Alexa global
'http://www.appszoom.com/',
# Why: #6950 in Alexa global
'http://www.armagedomfilmes.biz/',
# Why: #6951 in Alexa global
'http://www.pnu.ac.ir/',
# Why: #6952 in Alexa global
'http://www.globalbux.net/',
# Why: #6953 in Alexa global
'http://www.ebay.com.hk/',
# Why: #6954 in Alexa global
'http://www.ladenzeile.de/',
# Why: #6955 in Alexa global
'http://www.thedomainfo.com/',
# Why: #6956 in Alexa global
'http://www.naosalvo.com.br/',
# Why: #6957 in Alexa global
'http://www.perfectcamgirls.com/',
# Why: #6958 in Alexa global
'http://www.verticalresponse.com/',
# Why: #6959 in Alexa global
'http://www.khabardehi.com/',
# Why: #6960 in Alexa global
'http://www.oszone.net/',
# Why: #6961 in Alexa global
'http://www.teamtreehouse.com/',
# Why: #6962 in Alexa global
'http://www.youtube.com/user/BlueXephos/',
# Why: #6963 in Alexa global
'http://www.humanservices.gov.au/',
# Why: #6964 in Alexa global
'http://www.bostonherald.com/',
# Why: #6965 in Alexa global
'http://www.kafeteria.pl/',
# Why: #6966 in Alexa global
'http://www.society6.com/',
# Why: #6967 in Alexa global
'http://www.gamevicio.com/',
# Why: #6968 in Alexa global
'http://www.crazyegg.com/',
# Why: #6969 in Alexa global
'http://www.logitravel.com/',
# Why: #6970 in Alexa global
'http://www.williams-sonoma.com/',
# Why: #6972 in Alexa global
'http://www.htmlgoodies.com/',
# Why: #6973 in Alexa global
'http://www.fontanka.ru/',
# Why: #6974 in Alexa global
'http://www.islamuon.com/',
# Why: #6975 in Alexa global
'http://www.tcs.com/',
# Why: #6976 in Alexa global
'http://www.elyrics.net/',
# Why: #6978 in Alexa global
'http://www.vip-prom.net/',
# Why: #6979 in Alexa global
'http://www.jobstreet.com.ph/',
# Why: #6980 in Alexa global
'http://www.designfloat.com/',
# Why: #6981 in Alexa global
'http://www.lavasoft.com/',
# Why: #6982 in Alexa global
'http://www.tianjinwe.com/',
# Why: #6983 in Alexa global
'http://www.telelistas.net/',
# Why: #6984 in Alexa global
'http://www.taglol.com/',
# Why: #6985 in Alexa global
'http://www.jacquieetmicheltv.net/',
# Why: #6986 in Alexa global
'http://www.esprit-online-shop.com/',
# Why: #6987 in Alexa global
'http://www.theeroticreview.com/',
# Why: #6988 in Alexa global
'http://www.boo-box.com/',
# Why: #6989 in Alexa global
'http://www.wandoujia.com/',
# Why: #6990 in Alexa global
'http://www.vgsgaming.com/',
# Why: #6991 in Alexa global
'http://www.yourtango.com/',
# Why: #6992 in Alexa global
'http://www.tianji.com/',
# Why: #6993 in Alexa global
'http://www.jpost.com/',
# Why: #6994 in Alexa global
'http://www.mythemeshop.com/',
# Why: #6995 in Alexa global
'http://www.seattlepi.com/',
# Why: #6996 in Alexa global
'http://www.nintendo.co.jp/',
# Why: #6997 in Alexa global
'http://bultannews.com/',
# Why: #6998 in Alexa global
'http://www.youlikehits.com/',
# Why: #6999 in Alexa global
'http://www.partycity.com/',
# Why: #7000 in Alexa global
'http://www.18qt.com/',
# Why: #7001 in Alexa global
'http://www.yuvutu.com/',
# Why: #7002 in Alexa global
'http://www.gq.com/',
# Why: #7003 in Alexa global
'http://www.wiziwig.tv/',
# Why: #7004 in Alexa global
'http://www.cinejosh.com/',
# Why: #7005 in Alexa global
'http://www.technet.com/',
# Why: #7006 in Alexa global
'http://www.vatanbilgisayar.com/',
# Why: #7007 in Alexa global
'http://www.guangjiela.com/',
# Why: #7008 in Alexa global
'http://www.shooter.com.cn/',
# Why: #7009 in Alexa global
'http://www.siteheart.com/',
# Why: #7010 in Alexa global
'http://www.in.gov/',
# Why: #7011 in Alexa global
'http://www.nulled.cc/',
# Why: #7012 in Alexa global
'http://www.mafiashare.net/',
# Why: #7013 in Alexa global
'http://www.tizag.com/',
# Why: #7014 in Alexa global
'http://www.hkjc.com/',
# Why: #7015 in Alexa global
'http://www.restaurant.com/',
# Why: #7016 in Alexa global
'http://www.consumersurveygroup.org/',
# Why: #7017 in Alexa global
'http://www.lolipop.jp/',
# Why: #7018 in Alexa global
'http://www.spin.de/',
# Why: #7019 in Alexa global
'http://www.silverlinetrips.com/',
# Why: #7020 in Alexa global
'http://www.triberr.com/',
# Why: #7021 in Alexa global
'http://www.gamesgirl.net/',
# Why: #7022 in Alexa global
'http://www.qqt38.com/',
# Why: #7023 in Alexa global
'http://www.xiaoshuomm.com/',
# Why: #7024 in Alexa global
'http://www.theopen.com/',
# Why: #7025 in Alexa global
'http://www.campograndenews.com.br/',
# Why: #7026 in Alexa global
'http://bshare.cn/',
# Why: #7028 in Alexa global
'http://www.soonnight.com/',
# Why: #7029 in Alexa global
'http://www.safaribooksonline.com/',
# Why: #7030 in Alexa global
'http://www.main-hosting.com/',
# Why: #7031 in Alexa global
'http://www.caclubindia.com/',
# Why: #7032 in Alexa global
'http://www.alibado.com/',
# Why: #7033 in Alexa global
'http://www.autorambler.ru/',
# Why: #7034 in Alexa global
'http://www.kafan.cn/',
# Why: #7035 in Alexa global
'http://www.tnt.com/',
# Why: #7036 in Alexa global
'http://www.chatango.com/',
# Why: #7037 in Alexa global
'http://www.satrk.com/',
# Why: #7039 in Alexa global
'http://www.pagesperso-orange.fr/',
# Why: #7040 in Alexa global
'http://www.cgbchina.com.cn/',
# Why: #7041 in Alexa global
'http://www.houseoffraser.co.uk/',
# Why: #7042 in Alexa global
'http://www.nullrefer.com/',
# Why: #7043 in Alexa global
'http://www.work.ua/',
# Why: #7044 in Alexa global
'http://www.inagist.com/',
# Why: #7045 in Alexa global
'http://www.kaban.tv/',
# Why: #7046 in Alexa global
'http://www.cnxad.com/',
# Why: #7047 in Alexa global
'http://www.tarad.com/',
# Why: #7048 in Alexa global
'http://www.masteetv.com/',
# Why: #7049 in Alexa global
'http://www.noblesamurai.com/',
# Why: #7050 in Alexa global
'http://www.businessweekly.com.tw/',
# Why: #7051 in Alexa global
'http://www.lifehacker.ru/',
# Why: #7052 in Alexa global
'http://www.anakbnet.com/',
# Why: #7053 in Alexa global
'http://www.google.co.ug/',
# Why: #7054 in Alexa global
'http://www.webcamsex.nl/',
# Why: #7055 in Alexa global
'http://kaoyan.com/',
# Why: #7056 in Alexa global
'http://www.ml.com/',
# Why: #7057 in Alexa global
'http://up.nic.in/',
# Why: #7058 in Alexa global
'http://www.bounceme.net/',
# Why: #7059 in Alexa global
'http://www.netfirms.com/',
# Why: #7060 in Alexa global
'http://www.idokep.hu/',
# Why: #7061 in Alexa global
'http://www.wambie.com/',
# Why: #7062 in Alexa global
'http://www.funpatogh.com/',
# Why: #7063 in Alexa global
'http://hmv.co.jp/',
# Why: #7064 in Alexa global
'http://www.bcash.com.br/',
# Why: #7065 in Alexa global
'http://www.sedo.co.uk/',
# Why: #7066 in Alexa global
'http://www.game2.cn/',
# Why: #7067 in Alexa global
'http://www.noupe.com/',
# Why: #7068 in Alexa global
'http://www.mydirtyhobby.com/',
# Why: #7069 in Alexa global
'http://www.neswangy.net/',
# Why: #7070 in Alexa global
'http://www.downloadprovider.me/',
# Why: #7071 in Alexa global
'http://www.utah.gov/',
# Why: #7072 in Alexa global
'http://www.consumerintelligenceusa.com/',
# Why: #7073 in Alexa global
'http://www.itimes.com/',
# Why: #7074 in Alexa global
'http://www.picroma.com/',
# Why: #7075 in Alexa global
'http://www.lustagenten.com/',
# Why: #7076 in Alexa global
'http://www.monex.co.jp/',
# Why: #7077 in Alexa global
'http://www.kemdiknas.go.id/',
# Why: #7078 in Alexa global
'http://www.sitepronews.com/',
# Why: #7079 in Alexa global
'http://www.ruseller.com/',
# Why: #7080 in Alexa global
'http://www.tradecarview.com/',
# Why: #7081 in Alexa global
'http://www.favstar.fm/',
# Why: #7082 in Alexa global
'http://www.bestbuy.ca/',
# Why: #7083 in Alexa global
'http://www.yelp.ca/',
# Why: #7084 in Alexa global
'http://www.stop-sex.com/',
# Why: #7085 in Alexa global
'http://www.rewity.com/',
# Why: #7086 in Alexa global
'http://www.qiqigames.com/',
# Why: #7087 in Alexa global
'http://www.suntimes.com/',
# Why: #7088 in Alexa global
'http://www.hardware.fr/',
# Why: #7089 in Alexa global
'http://www.rxlist.com/',
# Why: #7090 in Alexa global
'http://www.bgr.com/',
# Why: #7091 in Alexa global
'http://www.zalora.co.id/',
# Why: #7092 in Alexa global
'http://www.mandatory.com/',
# Why: #7094 in Alexa global
'http://www.collarme.com/',
# Why: #7095 in Alexa global
'http://www.mycommerce.com/',
# Why: #7096 in Alexa global
'http://www.holidayiq.com/',
# Why: #7097 in Alexa global
'http://www.filecloud.io/',
# Why: #7098 in Alexa global
'http://www.vconnect.com/',
# Why: #7099 in Alexa global
'http://66163.com/',
# Why: #7100 in Alexa global
'http://www.tlen.pl/',
# Why: #7101 in Alexa global
'http://www.mmbang.com/',
# Why: #7102 in Alexa global
'http://7c.com/',
# Why: #7103 in Alexa global
'http://www.digitalriver.com/',
# Why: #7104 in Alexa global
'http://www.24video.net/',
# Why: #7105 in Alexa global
'http://www.worthofweb.com/',
# Why: #7106 in Alexa global
'http://www.clasicooo.com/',
# Why: #7107 in Alexa global
'http://www.greatschools.net/',
# Why: #7108 in Alexa global
'http://www.tagesanzeiger.ch/',
# Why: #7109 in Alexa global
'http://www.video.az/',
# Why: #7110 in Alexa global
'http://www.osu.edu/',
# Why: #7111 in Alexa global
'http://www.careers360.com/',
# Why: #7112 in Alexa global
'http://www.101.ru/',
# Why: #7113 in Alexa global
'http://www.conforama.fr/',
# Why: #7114 in Alexa global
'http://www.apollo.lv/',
# Why: #7115 in Alexa global
'http://www.netcq.net/',
# Why: #7116 in Alexa global
'http://www.jofogas.hu/',
# Why: #7117 in Alexa global
'http://www.niftylink.com/',
# Why: #7118 in Alexa global
'http://www.midwayusa.com/',
# Why: #7119 in Alexa global
'http://www.collegeteensex.net/',
# Why: #7120 in Alexa global
'http://www.search.com/',
# Why: #7121 in Alexa global
'http://www.naftemporiki.gr/',
# Why: #7122 in Alexa global
'http://www.sainsburys.co.uk/',
# Why: #7123 in Alexa global
'http://www.fitsugar.com/',
# Why: #7124 in Alexa global
'http://www.ifixit.com/',
# Why: #7125 in Alexa global
'http://www.uid.me/',
# Why: #7126 in Alexa global
'http://www.malwarebytes.org/',
# Why: #7127 in Alexa global
'http://www.maxbounty.com/',
# Why: #7128 in Alexa global
'http://www.mensfitness.com/',
# Why: #7129 in Alexa global
'http://www.rtl.be/',
# Why: #7130 in Alexa global
'http://www.yidio.com/',
# Why: #7131 in Alexa global
'http://www.dostorasly.com/',
# Why: #7132 in Alexa global
'http://www.abovetopsecret.com/',
# Why: #7133 in Alexa global
'http://www.sm3na.com/',
# Why: #7134 in Alexa global
'http://www.cam.ac.uk/',
# Why: #7135 in Alexa global
'http://www.gamegape.com/',
# Why: #7136 in Alexa global
'http://www.ocioso.com.br/',
# Why: #7138 in Alexa global
'http://www.register.com/',
# Why: #7139 in Alexa global
'http://www.wwitv.com/',
# Why: #7140 in Alexa global
'http://www.ishangman.com/',
# Why: #7141 in Alexa global
'http://www.gry-online.pl/',
# Why: #7142 in Alexa global
'http://www.ogli.org/',
# Why: #7143 in Alexa global
'http://www.redbull.com/',
# Why: #7144 in Alexa global
'http://www.dyn.com/',
# Why: #7145 in Alexa global
'http://www.freeservers.com/',
# Why: #7146 in Alexa global
'http://www.brandsoftheworld.com/',
# Why: #7147 in Alexa global
'http://www.lorddownload.com/',
# Why: #7148 in Alexa global
'http://www.epson.co.jp/',
# Why: #7149 in Alexa global
'http://www.mybet.com/',
# Why: #7150 in Alexa global
'http://www.brothalove.com/',
# Why: #7151 in Alexa global
'http://www.inchallah.com/',
# Why: #7153 in Alexa global
'http://www.lottomatica.it/',
# Why: #7154 in Alexa global
'http://www.indiamp3.com/',
# Why: #7155 in Alexa global
'http://www.qianbao666.com/',
# Why: #7156 in Alexa global
'http://www.zurb.com/',
# Why: #7157 in Alexa global
'http://www.synxis.com/',
# Why: #7158 in Alexa global
'http://www.baskino.com/',
# Why: #7159 in Alexa global
'http://www.swefilmer.com/',
# Why: #7160 in Alexa global
'http://www.hotstartsearch.com/',
# Why: #7161 in Alexa global
'http://www.cloudmoney.info/',
# Why: #7162 in Alexa global
'http://www.polldaddy.com/',
# Why: #7163 in Alexa global
'http://www.moheet.com/',
# Why: #7164 in Alexa global
'http://www.idhostinger.com/',
# Why: #7166 in Alexa global
'http://www.mp3chief.com/',
# Why: #7167 in Alexa global
'http://www.7netshopping.jp/',
# Why: #7168 in Alexa global
'http://www.tao123.com/',
# Why: #7169 in Alexa global
'http://www.channelnewsasia.com/',
# Why: #7170 in Alexa global
'http://www.yahoo-help.jp/',
# Why: #7171 in Alexa global
'http://www.galeon.com/',
# Why: #7172 in Alexa global
'http://www.aviasales.ru/',
# Why: #7173 in Alexa global
'http://www.datafilehost.com/',
# Why: #7174 in Alexa global
'http://www.travian.com.eg/',
# Why: #7175 in Alexa global
'http://www.ebookee.org/',
# Why: #7176 in Alexa global
'http://www.filmstarts.de/',
# Why: #7177 in Alexa global
'http://www.inccel.com/',
# Why: #7178 in Alexa global
'http://www.chatroulette.com/',
# Why: #7179 in Alexa global
'http://www.it-ebooks.info/',
# Why: #7180 in Alexa global
'http://www.sina.com.tw/',
# Why: #7181 in Alexa global
'http://www.nix.ru/',
# Why: #7182 in Alexa global
'http://www.antena3.ro/',
# Why: #7183 in Alexa global
'http://www.mylifetime.com/',
# Why: #7184 in Alexa global
'http://www.desitorrents.com/',
# Why: #7185 in Alexa global
'http://www.mydigitallife.info/',
# Why: #7186 in Alexa global
'http://www.aeropostale.com/',
# Why: #7187 in Alexa global
'http://www.anilos.com/',
# Why: #7188 in Alexa global
'http://www.macadogru.com/',
# Why: #7189 in Alexa global
'http://www.premiere.fr/',
# Why: #7190 in Alexa global
'http://www.estorebuilder.com/',
# Why: #7191 in Alexa global
'http://www.eventim.de/',
# Why: #7192 in Alexa global
'http://www.expert-offers.com/',
# Why: #7193 in Alexa global
'http://www.deloitte.com/',
# Why: #7194 in Alexa global
'http://www.thetimenow.com/',
# Why: #7195 in Alexa global
'http://www.spicybigbutt.com/',
# Why: #7196 in Alexa global
'http://www.gistmania.com/',
# Why: #7197 in Alexa global
'http://www.pekao24.pl/',
# Why: #7198 in Alexa global
'http://www.mbok.jp/',
# Why: #7199 in Alexa global
'http://www.linkfeed.ru/',
# Why: #7200 in Alexa global
'http://www.carnival.com/',
# Why: #7201 in Alexa global
'http://www.apherald.com/',
# Why: #7202 in Alexa global
'http://www.choicehotels.com/',
# Why: #7203 in Alexa global
'http://www.revolvermaps.com/',
# Why: #7204 in Alexa global
'http://digu.com/',
# Why: #7205 in Alexa global
'http://www.yekmobile.com/',
# Why: #7206 in Alexa global
'http://www.barbarianmovies.com/',
# Why: #7207 in Alexa global
'http://www.jogos.uol.com.br/',
# Why: #7208 in Alexa global
'http://www.poyopara.com/',
# Why: #7209 in Alexa global
'http://www.vse.kz/',
# Why: #7210 in Alexa global
'http://www.socialspark.com/',
# Why: #7211 in Alexa global
'http://www.deutschepost.de/',
# Why: #7212 in Alexa global
'http://www.nokaut.pl/',
# Why: #7214 in Alexa global
'http://www.farpost.ru/',
# Why: #7215 in Alexa global
'http://www.shoebuy.com/',
# Why: #7216 in Alexa global
'http://www.1c-bitrix.ru/',
# Why: #7217 in Alexa global
'http://www.pimproll.com/',
# Why: #7218 in Alexa global
'http://www.startxchange.com/',
# Why: #7219 in Alexa global
'http://www.seocentro.com/',
# Why: #7220 in Alexa global
'http://www.kporno.com/',
# Why: #7221 in Alexa global
'http://www.izvestia.ru/',
# Why: #7222 in Alexa global
'http://www.bathandbodyworks.com/',
# Why: #7223 in Alexa global
'http://www.allhyipmonitors.com/',
# Why: #7224 in Alexa global
'http://www.europe1.fr/',
# Why: #7225 in Alexa global
'http://www.charter.com/',
# Why: #7226 in Alexa global
'http://www.sixflags.com/',
# Why: #7227 in Alexa global
'http://www.abcjuegos.net/',
# Why: #7228 in Alexa global
'http://www.wind.it/',
# Why: #7229 in Alexa global
'http://www.femjoy.com/',
# Why: #7230 in Alexa global
'http://www.humanmetrics.com/',
# Why: #7231 in Alexa global
'http://www.myrealgames.com/',
# Why: #7232 in Alexa global
'http://www.cosmiq.de/',
# Why: #7233 in Alexa global
'http://www.bangbrosteenporn.com/',
# Why: #7234 in Alexa global
'http://www.kir.jp/',
# Why: #7235 in Alexa global
'http://www.thepetitionsite.com/',
# Why: #7236 in Alexa global
'http://laprensa.com.ni/',
# Why: #7237 in Alexa global
'http://www.investors.com/',
# Why: #7238 in Alexa global
'http://www.techpowerup.com/',
# Why: #7239 in Alexa global
'http://www.prosperityteam.com/',
# Why: #7240 in Alexa global
'http://www.autogidas.lt/',
# Why: #7241 in Alexa global
'http://www.state.ny.us/',
# Why: #7242 in Alexa global
'http://www.techbargains.com/',
# Why: #7243 in Alexa global
'http://www.takvim.com.tr/',
# Why: #7244 in Alexa global
'http://www.kko-appli.com/',
# Why: #7245 in Alexa global
'http://www.liex.ru/',
# Why: #7246 in Alexa global
'http://www.cafe24.com/',
# Why: #7247 in Alexa global
'http://www.definebabe.com/',
# Why: #7248 in Alexa global
'http://www.egirlgames.net/',
# Why: #7249 in Alexa global
'http://www.avangard.ru/',
# Why: #7250 in Alexa global
'http://www.sina.com.hk/',
# Why: #7251 in Alexa global
'http://www.freexcafe.com/',
# Why: #7252 in Alexa global
'http://www.vesti.bg/',
# Why: #7253 in Alexa global
'http://www.francetvinfo.fr/',
# Why: #7254 in Alexa global
'http://www.mathsisfun.com/',
# Why: #7255 in Alexa global
'http://www.easymobilerecharge.com/',
# Why: #7256 in Alexa global
'http://www.dapink.com/',
# Why: #7257 in Alexa global
'http://www.propellerads.com/',
# Why: #7258 in Alexa global
'http://www.devshed.com/',
# Why: #7259 in Alexa global
'http://www.clip.vn/',
# Why: #7260 in Alexa global
'http://www.vidivodo.com/',
# Why: #7262 in Alexa global
'http://www.blogspot.dk/',
# Why: #7263 in Alexa global
'http://www.foxnewsinsider.com/',
# Why: #7264 in Alexa global
'http://www.instapaper.com/',
# Why: #7265 in Alexa global
'http://www.premierleague.com/',
# Why: #7266 in Alexa global
'http://www.elo7.com.br/',
# Why: #7267 in Alexa global
'http://www.teenee.com/',
# Why: #7268 in Alexa global
'http://www.clien.net/',
# Why: #7269 in Alexa global
'http://www.computrabajo.com.co/',
# Why: #7270 in Alexa global
'http://www.komputronik.pl/',
# Why: #7271 in Alexa global
'http://www.livesurf.ru/',
# Why: #7272 in Alexa global
'http://www.123cha.com/',
# Why: #7273 in Alexa global
'http://www.cgg.gov.in/',
# Why: #7274 in Alexa global
'http://www.leadimpact.com/',
# Why: #7275 in Alexa global
'http://www.socialmonkee.com/',
# Why: #7276 in Alexa global
'http://www.speeddate.com/',
# Why: #7277 in Alexa global
'http://www.bet-at-home.com/',
# Why: #7278 in Alexa global
'http://www.todaoferta.uol.com.br/',
# Why: #7279 in Alexa global
'http://www.huanqiuauto.com/',
# Why: #7280 in Alexa global
'http://www.tadawul.com.sa/',
# Why: #7281 in Alexa global
'http://www.ucsd.edu/',
# Why: #7282 in Alexa global
'http://www.fda.gov/',
# Why: #7283 in Alexa global
'http://www.asahi-net.or.jp/',
# Why: #7284 in Alexa global
'http://www.cint.com/',
# Why: #7285 in Alexa global
'http://www.homedepot.ca/',
# Why: #7286 in Alexa global
'http://www.webcars.com.cn/',
# Why: #7288 in Alexa global
'http://www.ciao.de/',
# Why: #7289 in Alexa global
'http://www.gigglesglore.com/',
# Why: #7290 in Alexa global
'http://www.warframe.com/',
# Why: #7291 in Alexa global
'http://www.mulher.uol.com.br/',
# Why: #7292 in Alexa global
'http://www.prosieben.de/',
# Why: #7293 in Alexa global
'http://www.vistaprint.in/',
# Why: #7294 in Alexa global
'http://www.mapple.net/',
# Why: #7295 in Alexa global
'http://www.usafis.org/',
# Why: #7296 in Alexa global
'http://www.kuaipan.cn/',
# Why: #7297 in Alexa global
'http://www.truelife.com/',
# Why: #7298 in Alexa global
'http://1o26.com/',
# Why: #7299 in Alexa global
'http://www.boldsky.com/',
# Why: #7300 in Alexa global
'http://www.freeforums.org/',
# Why: #7301 in Alexa global
'http://www.lolnexus.com/',
# Why: #7302 in Alexa global
'http://ti-da.net/',
# Why: #7303 in Alexa global
'http://www.handelsbanken.se/',
# Why: #7304 in Alexa global
'http://www.khamsat.com/',
# Why: #7305 in Alexa global
'http://www.futbol24.com/',
# Why: #7306 in Alexa global
'http://www.wikifeet.com/',
# Why: #7307 in Alexa global
'http://www.dev-point.com/',
# Why: #7308 in Alexa global
'http://www.ibotoolbox.com/',
# Why: #7309 in Alexa global
'http://www.indeed.de/',
# Why: #7310 in Alexa global
'http://www.ct10000.com/',
# Why: #7311 in Alexa global
'http://www.appleinsider.com/',
# Why: #7312 in Alexa global
'http://www.lyoness.net/',
# Why: #7313 in Alexa global
'http://www.vodafone.com.eg/',
# Why: #7314 in Alexa global
'http://www.aifang.com/',
# Why: #7315 in Alexa global
'http://www.tripadvisor.com.br/',
# Why: #7316 in Alexa global
'http://www.hbo.com/',
# Why: #7317 in Alexa global
'http://www.pricerunner.com/',
# Why: #7318 in Alexa global
'http://www.4everproxy.com/',
# Why: #7319 in Alexa global
'http://www.fc-perspolis.com/',
# Why: #7320 in Alexa global
'http://www.themobileindian.com/',
# Why: #7321 in Alexa global
'http://www.gimp.org/',
# Why: #7322 in Alexa global
'http://www.novayagazeta.ru/',
# Why: #7323 in Alexa global
'http://www.dnfight.com/',
# Why: #7324 in Alexa global
'http://www.coco.fr/',
# Why: #7325 in Alexa global
'http://www.thestudentroom.co.uk/',
# Why: #7326 in Alexa global
'http://www.tiin.vn/',
# Why: #7327 in Alexa global
'http://www.dailystar.co.uk/',
# Why: #7328 in Alexa global
'http://www.empowernetwork.com/commissionloophole.php/',
# Why: #7329 in Alexa global
'http://www.unfollowed.me/',
# Why: #7330 in Alexa global
'http://www.wanfangdata.com.cn/',
# Why: #7331 in Alexa global
'http://www.aljazeerasport.net/',
# Why: #7332 in Alexa global
'http://www.nasygnale.pl/',
# Why: #7333 in Alexa global
'http://www.somethingawful.com/',
# Why: #7334 in Alexa global
'http://www.ddo.jp/',
# Why: #7335 in Alexa global
'http://www.scamadviser.com/',
# Why: #7336 in Alexa global
'http://www.mcanime.net/',
# Why: #7337 in Alexa global
'http://www.9stock.com/',
# Why: #7338 in Alexa global
'http://www.boostmobile.com/',
# Why: #7339 in Alexa global
'http://www.oyunkolu.com/',
# Why: #7340 in Alexa global
'http://www.beliefnet.com/',
# Why: #7341 in Alexa global
'http://www.lyrics007.com/',
# Why: #7342 in Alexa global
'http://www.rtv.net/',
# Why: #7343 in Alexa global
'http://www.hasbro.com/',
# Why: #7344 in Alexa global
'http://www.vcp.ir/',
# Why: #7345 in Alexa global
'http://www.fj-p.com/',
# Why: #7346 in Alexa global
'http://www.jetbrains.com/',
# Why: #7347 in Alexa global
'http://www.empowernetwork.com/almostasecret.php/',
# Why: #7348 in Alexa global
'http://www.cpalead.com/',
# Why: #7349 in Alexa global
'http://www.zetaboards.com/',
# Why: #7350 in Alexa global
'http://www.sbobet.com/',
# Why: #7351 in Alexa global
'http://www.v2ex.com/',
# Why: #7352 in Alexa global
'http://xsrv.jp/',
# Why: #7353 in Alexa global
'http://www.toggle.com/',
# Why: #7354 in Alexa global
'http://www.lanebryant.com/',
# Why: #7355 in Alexa global
'http://www.girlgames4u.com/',
# Why: #7356 in Alexa global
'http://www.amadershomoy1.com/',
# Why: #7357 in Alexa global
'http://www.planalto.gov.br/',
# Why: #7358 in Alexa global
'http://news-choice.net/',
# Why: #7359 in Alexa global
'http://sarkarinaukriblog.com/',
# Why: #7360 in Alexa global
'http://www.sudouest.fr/',
# Why: #7361 in Alexa global
'http://www.zdomo.com/',
# Why: #7362 in Alexa global
'http://www.egy-nn.com/',
# Why: #7363 in Alexa global
'http://www.pizzaplot.com/',
# Why: #7364 in Alexa global
'http://www.topgear.com/',
# Why: #7365 in Alexa global
'http://www.sony.co.in/',
# Why: #7366 in Alexa global
'http://www.nosv.org/',
# Why: #7367 in Alexa global
'http://www.beppegrillo.it/',
# Why: #7368 in Alexa global
'http://www.sakshieducation.com/',
# Why: #7370 in Alexa global
'http://www.temagay.com/',
# Why: #7371 in Alexa global
'http://www.stepashka.com/',
# Why: #7372 in Alexa global
'http://www.tmart.com/',
# Why: #7373 in Alexa global
'http://www.readwrite.com/',
# Why: #7374 in Alexa global
'http://www.tudiscoverykids.com/',
# Why: #7375 in Alexa global
'http://www.belfius.be/',
# Why: #7376 in Alexa global
'http://www.submitexpress.com/',
# Why: #7377 in Alexa global
'http://www.autoscout24.ch/',
# Why: #7378 in Alexa global
'http://www.aetna.com/',
# Why: #7379 in Alexa global
'http://www.torrent-anime.com/',
# Why: #7380 in Alexa global
'http://www.superhqporn.com/',
# Why: #7381 in Alexa global
'http://www.kaufda.de/',
# Why: #7382 in Alexa global
'http://www.adorocinema.com/',
# Why: #7383 in Alexa global
'http://www.burning-seri.es/',
# Why: #7384 in Alexa global
'http://www.rlsbb.com/',
# Why: #7385 in Alexa global
'http://www.lativ.com.tw/',
# Why: #7386 in Alexa global
'http://www.housing.co.in/',
# Why: #7387 in Alexa global
'http://www.taiwanlottery.com.tw/',
# Why: #7388 in Alexa global
'http://www.invisionfree.com/',
# Why: #7389 in Alexa global
'http://www.istruzione.it/',
# Why: #7390 in Alexa global
'http://www.desk.com/',
# Why: #7391 in Alexa global
'http://www.lyricsmint.com/',
# Why: #7392 in Alexa global
'http://www.taohuopu.com/',
# Why: #7393 in Alexa global
'http://www.silverdaddies.com/',
# Why: #7394 in Alexa global
'http://www.gov.cl/',
# Why: #7395 in Alexa global
'http://www.vtc.vn/',
# Why: #7397 in Alexa global
'http://www.tanea.gr/',
# Why: #7398 in Alexa global
'http://www.labirint.ru/',
# Why: #7399 in Alexa global
'http://www.sns104.com/',
# Why: #7400 in Alexa global
'http://www.plu.cn/',
# Why: #7401 in Alexa global
'http://www.bigpicture.ru/',
# Why: #7402 in Alexa global
'http://www.marketo.com/',
# Why: #7403 in Alexa global
'http://www.ismmagic.com/',
# Why: #7404 in Alexa global
'http://www.c-sharpcorner.com/',
# Why: #7405 in Alexa global
'http://www.synacor.com/',
# Why: #7406 in Alexa global
'http://www.answered-questions.com/',
# Why: #7407 in Alexa global
'http://www.prlog.ru/',
# Why: #7408 in Alexa global
'http://www.vodafone.com.tr/',
# Why: #7409 in Alexa global
'http://www.yofoto.cn/',
# Why: #7410 in Alexa global
'http://www.thenews.com.pk/',
# Why: #7411 in Alexa global
'http://www.galaxygiftcard.com/',
# Why: #7412 in Alexa global
'http://www.job-search-engine.com/',
# Why: #7413 in Alexa global
'http://www.se.pl/',
# Why: #7414 in Alexa global
'http://www.consumercomplaints.in/',
# Why: #7415 in Alexa global
'http://www.265.com/',
# Why: #7416 in Alexa global
'http://www.cba.pl/',
# Why: #7417 in Alexa global
'http://www.humoron.com/',
# Why: #7418 in Alexa global
'http://www.uscourts.gov/',
# Why: #7419 in Alexa global
'http://www.blog.pl/',
# Why: #7421 in Alexa global
'http://youtu.be/',
# Why: #7422 in Alexa global
'http://www.play4free.com/',
# Why: #7423 in Alexa global
'http://www.blizko.ru/',
# Why: #7424 in Alexa global
'http://www.uswebproxy.com/',
# Why: #7425 in Alexa global
'http://www.housefun.com.tw/',
# Why: #7426 in Alexa global
'http://www.winning-play.com/',
# Why: #7427 in Alexa global
'http://www.yourstory.in/',
# Why: #7428 in Alexa global
'http://www.tinmoi.vn/',
# Why: #7429 in Alexa global
'http://www.yongchuntang.net/',
# Why: #7430 in Alexa global
'http://www.artofmanliness.com/',
# Why: #7431 in Alexa global
'http://www.nadaguides.com/',
# Why: #7432 in Alexa global
'http://www.ndr.de/',
# Why: #7433 in Alexa global
'http://www.kuidle.com/',
# Why: #7434 in Alexa global
'http://www.hopy.com/',
# Why: #7435 in Alexa global
'http://www.roi.ru/',
# Why: #7436 in Alexa global
'http://www.sdpnoticias.com/',
# Why: #7437 in Alexa global
'http://www.nation.com/',
# Why: #7438 in Alexa global
'http://www.gnu.org/',
# Why: #7439 in Alexa global
'http://www.vogue.co.uk/',
# Why: #7440 in Alexa global
'http://www.letsebuy.com/',
# Why: #7441 in Alexa global
'http://www.preloved.co.uk/',
# Why: #7442 in Alexa global
'http://www.yatedo.com/',
# Why: #7443 in Alexa global
'http://www.rs-online.com/',
# Why: #7444 in Alexa global
'http://www.kino-teatr.ru/',
# Why: #7445 in Alexa global
'http://www.meeticaffinity.fr/',
# Why: #7446 in Alexa global
'http://www.clip.dj/',
# Why: #7447 in Alexa global
'http://www.j-sen.jp/',
# Why: #7448 in Alexa global
'http://www.compete.com/',
# Why: #7449 in Alexa global
'http://pravda.sk/',
# Why: #7450 in Alexa global
'http://www.oursogo.com/',
# Why: #7451 in Alexa global
'http://www.designyourway.net/',
# Why: #7452 in Alexa global
'http://www.elcorreo.com/',
# Why: #7453 in Alexa global
'http://www.williamhill.es/',
# Why: #7454 in Alexa global
'http://www.lavenir.net/',
# Why: #7455 in Alexa global
'http://www.voyage-prive.es/',
# Why: #7456 in Alexa global
'http://www.teambeachbody.com/',
# Why: #7457 in Alexa global
'http://www.sportdog.gr/',
# Why: #7458 in Alexa global
'http://www.klicktel.de/',
# Why: #7459 in Alexa global
'http://www.ktonanovenkogo.ru/',
# Why: #7460 in Alexa global
'http://www.sbwire.com/',
# Why: #7461 in Alexa global
'http://www.pearsoncmg.com/',
# Why: #7462 in Alexa global
'http://www.bankifsccode.com/',
# Why: #7463 in Alexa global
'http://www.thenationonlineng.net/',
# Why: #7464 in Alexa global
'http://www.itp.ne.jp/',
# Why: #7465 in Alexa global
'http://www.bangbros1.com/',
# Why: #7466 in Alexa global
'http://www.tarot.com/',
# Why: #7467 in Alexa global
'http://www.acdsee.com/',
# Why: #7468 in Alexa global
'http://www.blogos.com/',
# Why: #7469 in Alexa global
'http://www.dinnerwithmariah.com/',
# Why: #7470 in Alexa global
'http://www.japan-women-dating.com/',
# Why: #7471 in Alexa global
'http://www.sarzamindownload.com/',
# Why: #7472 in Alexa global
'http://www.timesonline.co.uk/',
# Why: #7473 in Alexa global
'http://okbuy.com/',
# Why: #7474 in Alexa global
'http://www.sbb.ch/',
# Why: #7475 in Alexa global
'http://www.mundogaturro.com/',
# Why: #7476 in Alexa global
'http://www.meinvz.net/',
# Why: #7477 in Alexa global
'http://www.trafficadbar.com/',
# Why: #7478 in Alexa global
'http://www.9minecraft.net/',
# Why: #7479 in Alexa global
'http://www.suntory.co.jp/',
# Why: #7480 in Alexa global
'http://www.nextbigwhat.com/',
# Why: #7481 in Alexa global
'http://www.eshetab.com/',
# Why: #7482 in Alexa global
'http://www.meristation.com/',
# Why: #7483 in Alexa global
'http://www.kalahari.com/',
# Why: #7484 in Alexa global
'http://www.pimpandhost.com/',
# Why: #7485 in Alexa global
'http://www.pbworks.com/',
# Why: #7486 in Alexa global
'http://www.peopledaily.com.cn/',
# Why: #7487 in Alexa global
'http://www.bokee.net/',
# Why: #7488 in Alexa global
'http://www.google.ps/',
# Why: #7489 in Alexa global
'http://www.seccionamarilla.com.mx/',
# Why: #7490 in Alexa global
'http://www.foroactivo.com/',
# Why: #7491 in Alexa global
'http://www.gizmodo.jp/',
# Why: #7492 in Alexa global
'http://www.kalaydo.de/',
# Why: #7493 in Alexa global
'http://www.gomaji.com/',
# Why: #7494 in Alexa global
'http://www.exactseek.com/',
# Why: #7495 in Alexa global
'http://www.cashtaller.ru/',
# Why: #7496 in Alexa global
'http://www.blogspot.co.nz/',
# Why: #7497 in Alexa global
'http://www.volvocars.com/',
# Why: #7498 in Alexa global
'http://www.marathonbet.com/',
# Why: #7499 in Alexa global
'http://www.cityhouse.cn/',
# Why: #7500 in Alexa global
'http://www.hk-pub.com/',
# Why: #7501 in Alexa global
'http://www.seriouslyfacts.me/',
# Why: #7502 in Alexa global
'http://www.streetdirectory.com/',
# Why: #7504 in Alexa global
'http://www.mediamasr.tv/',
# Why: #7505 in Alexa global
'http://www.straitstimes.com/',
# Why: #7506 in Alexa global
'http://www.promodj.com/',
# Why: #7507 in Alexa global
'http://www.3dwwwgame.com/',
# Why: #7508 in Alexa global
'http://www.autovit.ro/',
# Why: #7509 in Alexa global
'http://www.ahlalhdeeth.com/',
# Why: #7510 in Alexa global
'http://www.forum-auto.com/',
# Why: #7511 in Alexa global
'http://www.stooorage.com/',
# Why: #7512 in Alexa global
'http://www.mobilism.org/',
# Why: #7513 in Alexa global
'http://www.hideref.org/',
# Why: #7514 in Alexa global
'http://www.mn66.com/',
# Why: #7515 in Alexa global
'http://www.internations.org/',
# Why: #7516 in Alexa global
'http://www.dhc.co.jp/',
# Why: #7517 in Alexa global
'http://www.sbicard.com/',
# Why: #7518 in Alexa global
'http://www.dayoo.com/',
# Why: #7519 in Alexa global
'http://biquge.com/',
# Why: #7520 in Alexa global
'http://www.theme.wordpress.com/',
# Why: #7521 in Alexa global
'http://www.mrdoob.com/',
# Why: #7522 in Alexa global
'http://www.vpls.net/',
# Why: #7523 in Alexa global
'http://www.alquma-a.com/',
# Why: #7524 in Alexa global
'http://www.bankmillennium.pl/',
# Why: #7525 in Alexa global
'http://www.mitele.es/',
# Why: #7526 in Alexa global
'http://www.tro-ma-ktiko.blogspot.gr/',
# Why: #7527 in Alexa global
'http://www.bookmark4you.com/',
# Why: #7530 in Alexa global
'http://www.tencent.com/',
# Why: #7531 in Alexa global
'http://www.bsi.ir/',
# Why: #7532 in Alexa global
'http://t.cn/',
# Why: #7533 in Alexa global
'http://www.fox.com/',
# Why: #7534 in Alexa global
'http://www.payback.de/',
# Why: #7535 in Alexa global
'http://www.tubepornfilm.com/',
# Why: #7536 in Alexa global
'http://www.herold.at/',
# Why: #7537 in Alexa global
'http://www.elperiodico.com/',
# Why: #7538 in Alexa global
'http://www.lolesports.com/',
# Why: #7539 in Alexa global
'http://www.hrs.de/',
# Why: #7540 in Alexa global
'http://www.trustlink.ru/',
# Why: #7541 in Alexa global
'http://www.pricemachine.com/',
# Why: #7542 in Alexa global
'http://www.zombie.jp/',
# Why: #7543 in Alexa global
'http://www.socialadr.com/',
# Why: #7544 in Alexa global
'http://www.anandabazar.com/',
# Why: #7545 in Alexa global
'http://www.jacquieetmicheltv2.net/',
# Why: #7546 in Alexa global
'http://www.monster.de/',
# Why: #7547 in Alexa global
'http://www.allposters.com/',
# Why: #7548 in Alexa global
'http://www.blog.ir/',
# Why: #7549 in Alexa global
'http://www.ad4game.com/',
# Why: #7550 in Alexa global
'http://www.alkislarlayasiyorum.com/',
# Why: #7551 in Alexa global
'http://www.ptcsolution.com/',
# Why: #7552 in Alexa global
'http://www.moviepilot.com/',
# Why: #7553 in Alexa global
'http://www.ddizi.org/',
# Why: #7554 in Alexa global
'http://dmzj.com/',
# Why: #7555 in Alexa global
'http://www.onvasortir.com/',
# Why: #7556 in Alexa global
'http://www.ferronetwork.com/',
# Why: #7557 in Alexa global
'http://www.seagate.com/',
# Why: #7558 in Alexa global
'http://www.starmedia.com/',
# Why: #7559 in Alexa global
'http://www.topit.me/',
# Why: #7560 in Alexa global
'http://www.alexa.cn/',
# Why: #7561 in Alexa global
'http://www.developpez.net/',
# Why: #7562 in Alexa global
'http://www.papajogos.com.br/',
# Why: #7563 in Alexa global
'http://www.btalah.com/',
# Why: #7564 in Alexa global
'http://www.gateway.gov.uk/',
# Why: #7565 in Alexa global
'http://www.fotki.com/',
# Why: #7566 in Alexa global
'http://www.holidaylettings.co.uk/',
# Why: #7567 in Alexa global
'http://www.rzeczpospolita.pl/',
# Why: #7569 in Alexa global
'http://www.charter97.org/',
# Why: #7570 in Alexa global
'http://www.robtex.com/',
# Why: #7571 in Alexa global
'http://bestadbid.com/',
# Why: #7572 in Alexa global
'http://www.unblog.fr/',
# Why: #7573 in Alexa global
'http://www.archive.is/',
# Why: #7574 in Alexa global
'http://www.microworkers.com/',
# Why: #7575 in Alexa global
'http://www.vbulletin.org/',
# Why: #7576 in Alexa global
'http://www.jetswap.com/',
# Why: #7577 in Alexa global
'http://www.badoink.com/',
# Why: #7578 in Alexa global
'http://www.adobeconnect.com/',
# Why: #7579 in Alexa global
'http://www.cutt.us/',
# Why: #7580 in Alexa global
'http://lovemake.biz/',
# Why: #7581 in Alexa global
'http://www.xpress.com/',
# Why: #7582 in Alexa global
'http://www.di.se/',
# Why: #7583 in Alexa global
'http://www.ppomppu.co.kr/',
# Why: #7584 in Alexa global
'http://www.jacquielawson.com/',
# Why: #7585 in Alexa global
'http://www.sat1.de/',
# Why: #7586 in Alexa global
'http://www.adshuffle.com/',
# Why: #7587 in Alexa global
'http://www.homepage.com.tr/',
# Why: #7588 in Alexa global
'http://www.treehugger.com/',
# Why: #7589 in Alexa global
'http://www.selectornews.com/',
# Why: #7590 in Alexa global
'http://www.dap-news.com/',
# Why: #7591 in Alexa global
'http://www.tvline.com/',
# Why: #7592 in Alexa global
'http://www.co188.com/',
# Why: #7593 in Alexa global
'http://www.bfmtv.com/',
# Why: #7594 in Alexa global
'http://www.nastygal.com/',
# Why: #7595 in Alexa global
'http://www.cebupacificair.com/',
# Why: #7596 in Alexa global
'http://www.spr.ru/',
# Why: #7597 in Alexa global
'http://www.vazeh.com/',
# Why: #7598 in Alexa global
'http://www.worldmarket.com/',
# Why: #7599 in Alexa global
'http://www.americanlivewire.com/',
# Why: #7600 in Alexa global
'http://www.befunky.com/',
# Why: #7601 in Alexa global
'http://www.movie2k.tl/',
# Why: #7602 in Alexa global
'http://www.coach.com/',
# Why: #7603 in Alexa global
'http://www.whattoexpect.com/',
# Why: #7604 in Alexa global
'http://www.share-online.biz/',
# Why: #7605 in Alexa global
'http://www.fishwrapper.com/',
# Why: #7606 in Alexa global
'http://www.aktifhaber.com/',
# Why: #7607 in Alexa global
'http://www.downxsoft.com/',
# Why: #7608 in Alexa global
'http://www.websurf.ru/',
# Why: #7609 in Alexa global
'http://www.belluna.jp/',
# Why: #7610 in Alexa global
'http://www.bbcgoodfood.com/',
# Why: #7611 in Alexa global
'http://www.france2.fr/',
# Why: #7612 in Alexa global
'http://www.gyakorikerdesek.hu/',
# Why: #7614 in Alexa global
'http://www.lidovky.cz/',
# Why: #7615 in Alexa global
'http://www.thithtoolwin.info/',
# Why: #7616 in Alexa global
'http://www.psbc.com/',
# Why: #7617 in Alexa global
'http://www.766.com/',
# Why: #7618 in Alexa global
'http://www.co-operativebank.co.uk/',
# Why: #7619 in Alexa global
'http://www.iwriter.com/',
# Why: #7620 in Alexa global
'http://www.bravotv.com/',
# Why: #7621 in Alexa global
'http://www.e23.cn/',
# Why: #7622 in Alexa global
'http://www.empowernetwork.com/ublQXbhzgWgGAF9/',
# Why: #7623 in Alexa global
'http://www.sbs.com.au/',
# Why: #7624 in Alexa global
'http://www.dtiserv2.com/',
# Why: #7625 in Alexa global
'http://www.watchever.de/',
# Why: #7626 in Alexa global
'http://www.playhub.com/',
# Why: #7627 in Alexa global
'http://www.globovision.com/',
# Why: #7628 in Alexa global
'http://www.intereconomia.com/',
# Why: #7629 in Alexa global
'http://www.poznan.pl/',
# Why: #7630 in Alexa global
'http://www.comicbookmovie.com/',
# Why: #7632 in Alexa global
'http://www.ocomico.net/',
# Why: #7634 in Alexa global
'http://www.housetrip.com/',
# Why: #7635 in Alexa global
'http://www.freewebsubmission.com/',
# Why: #7636 in Alexa global
'http://www.karmaloop.com/',
# Why: #7637 in Alexa global
'http://www.savevid.com/',
# Why: #7638 in Alexa global
'http://www.lastpass.com/',
# Why: #7639 in Alexa global
'http://yougou.com/',
# Why: #7640 in Alexa global
'http://www.iafd.com/',
# Why: #7641 in Alexa global
'http://www.casertex.com/',
# Why: #7642 in Alexa global
'http://www.gmail.com/',
# Why: #7643 in Alexa global
'http://www.modhoster.de/',
# Why: #7644 in Alexa global
'http://www.post-gazette.com/',
# Why: #7645 in Alexa global
'http://www.digikey.com/',
# Why: #7646 in Alexa global
'http://www.torrentleech.org/',
# Why: #7647 in Alexa global
'http://www.stamps.com/',
# Why: #7648 in Alexa global
'http://www.lifestyleinsights.org/',
# Why: #7649 in Alexa global
'http://www.pandawill.com/',
# Why: #7650 in Alexa global
'http://www.wm-panel.com/',
# Why: #7651 in Alexa global
'http://www.um-per.com/',
# Why: #7652 in Alexa global
'http://www.straighttalk.com/',
# Why: #7653 in Alexa global
'http://www.xpersonals.com/',
# Why: #7655 in Alexa global
'http://www.bondfaro.com.br/',
# Why: #7656 in Alexa global
'http://www.tvrage.com/',
# Why: #7657 in Alexa global
'http://www.rockongags.com/',
# Why: #7658 in Alexa global
'http://www.4jok.com/',
# Why: #7659 in Alexa global
'http://www.zoom.com.br/',
# Why: #7660 in Alexa global
'http://www.cnn.co.jp/',
# Why: #7661 in Alexa global
'http://www.pixabay.com/',
# Why: #7662 in Alexa global
'http://www.path.com/',
# Why: #7663 in Alexa global
'http://www.hiphopdx.com/',
# Why: #7664 in Alexa global
'http://www.ptbus.com/',
# Why: #7665 in Alexa global
'http://www.fussball.de/',
# Why: #7666 in Alexa global
'http://www.windows.net/',
# Why: #7667 in Alexa global
'http://www.adweek.com/',
# Why: #7668 in Alexa global
'http://www.kraftrecipes.com/',
# Why: #7669 in Alexa global
'http://www.redtram.com/',
# Why: #7670 in Alexa global
'http://www.youravon.com/',
# Why: #7671 in Alexa global
'http://www.ladepeche.fr/',
# Why: #7672 in Alexa global
'http://www.jiwu.com/',
# Why: #7673 in Alexa global
'http://www.hobbylobby.com/',
# Why: #7674 in Alexa global
'http://www.otzyv.ru/',
# Why: #7675 in Alexa global
'http://www.sky-fire.com/',
# Why: #7676 in Alexa global
'http://www.fileguru.com/',
# Why: #7677 in Alexa global
'http://www.vandal.net/',
# Why: #7678 in Alexa global
'http://www.haozu.com/',
# Why: #7679 in Alexa global
'http://www.syd.com.cn/',
# Why: #7680 in Alexa global
'http://www.laxteams.net/',
# Why: #7681 in Alexa global
'http://www.cpvtrack202.com/',
# Why: #7682 in Alexa global
'http://www.libraryreserve.com/',
# Why: #7683 in Alexa global
'http://www.tvigle.ru/',
# Why: #7684 in Alexa global
'http://www.hoopshype.com/',
# Why: #7685 in Alexa global
'http://www.worldcat.org/',
# Why: #7686 in Alexa global
'http://www.eventful.com/',
# Why: #7687 in Alexa global
'http://www.nettiauto.com/',
# Why: #7688 in Alexa global
'http://www.generalfiles.org/',
# Why: #7689 in Alexa global
'http://www.ojooo.com/',
# Why: #7690 in Alexa global
'http://www.thatisnotasport.com/',
# Why: #7691 in Alexa global
'http://www.thepioneerwoman.com/',
# Why: #7692 in Alexa global
'http://www.social-bookmarking.net/',
# Why: #7693 in Alexa global
'http://www.lookforithere.info/',
# Why: #7694 in Alexa global
'http://www.americanapparel.net/',
# Why: #7695 in Alexa global
'http://www.protv.ro/',
# Why: #7696 in Alexa global
'http://www.jeux-gratuits.com/',
# Why: #7697 in Alexa global
'http://www.tomoson.com/',
# Why: #7698 in Alexa global
'http://www.jpn.org/',
# Why: #7699 in Alexa global
'http://www.cpz.to/',
# Why: #7700 in Alexa global
'http://www.vrisko.gr/',
# Why: #7701 in Alexa global
'http://www.cbox.ws/',
# Why: #7702 in Alexa global
'http://www.vandelaydesign.com/',
# Why: #7703 in Alexa global
'http://www.macmillandictionary.com/',
# Why: #7704 in Alexa global
'http://www.eventure.com/',
# Why: #7705 in Alexa global
'http://www.niniweblog.com/',
# Why: #7706 in Alexa global
'http://www.ecwid.com/',
# Why: #7708 in Alexa global
'http://www.garuda-indonesia.com/',
# Why: #7709 in Alexa global
'http://www.education.com/',
# Why: #7710 in Alexa global
'http://www.natalie.mu/',
# Why: #7711 in Alexa global
'http://www.gigsandfestivals.co.uk/',
# Why: #7712 in Alexa global
'http://www.onlainfilm.ucoz.ua/',
# Why: #7713 in Alexa global
'http://www.hotwords.com/',
# Why: #7714 in Alexa global
'http://www.jagobd.com/',
# Why: #7715 in Alexa global
'http://www.pageset.com/',
# Why: #7716 in Alexa global
'http://www.sagepay.com/',
# Why: #7717 in Alexa global
'http://www.runkeeper.com/',
# Why: #7718 in Alexa global
'http://www.beeztube.com/',
# Why: #7719 in Alexa global
'http://www.pinla.com/',
# Why: #7720 in Alexa global
'http://www.blizzard.com/',
# Why: #7721 in Alexa global
'http://www.unc.edu/',
# Why: #7722 in Alexa global
'http://www.makememarvellous.com/',
# Why: #7723 in Alexa global
'http://www.wer-weiss-was.de/',
# Why: #7724 in Alexa global
'http://www.ubc.ca/',
# Why: #7725 in Alexa global
'http://www.utoronto.ca/',
# Why: #7726 in Alexa global
'http://www.avsforum.com/',
# Why: #7727 in Alexa global
'http://www.newrelic.com/',
# Why: #7728 in Alexa global
'http://www.orkut.co.in/',
# Why: #7729 in Alexa global
'http://www.wawa-mania.ec/',
# Why: #7730 in Alexa global
'http://www.tvuol.uol.com.br/',
# Why: #7731 in Alexa global
'http://www.ncsu.edu/',
# Why: #7732 in Alexa global
'http://www.ne.jp/',
# Why: #7733 in Alexa global
'http://www.redhat.com/',
# Why: #7734 in Alexa global
'http://www.toyota.jp/',
# Why: #7735 in Alexa global
'http://www.nsdl.co.in/',
# Why: #7736 in Alexa global
'http://www.lavoz.com.ar/',
# Why: #7737 in Alexa global
'http://www.navy.mil/',
# Why: #7738 in Alexa global
'http://www.mg.gov.br/',
# Why: #7739 in Alexa global
'http://gizmodo.uol.com.br/',
# Why: #7740 in Alexa global
'http://www.psychcentral.com/',
# Why: #7741 in Alexa global
'http://www.ultipro.com/',
# Why: #7742 in Alexa global
'http://www.unisa.ac.za/',
# Why: #7743 in Alexa global
'http://www.sooperarticles.com/',
# Why: #7744 in Alexa global
'http://www.wondershare.com/',
# Why: #7745 in Alexa global
'http://www.wholefoodsmarket.com/',
# Why: #7746 in Alexa global
'http://www.dumpaday.com/',
# Why: #7747 in Alexa global
'http://www.littlewoods.com/',
# Why: #7748 in Alexa global
'http://www.carscom.net/',
# Why: #7749 in Alexa global
'http://www.meitu.com/',
# Why: #7750 in Alexa global
'http://www.9lwan.com/',
# Why: #7751 in Alexa global
'http://www.emailmeform.com/',
# Why: #7752 in Alexa global
'http://www.arte.tv/',
# Why: #7753 in Alexa global
'http://www.tribalfootball.com/',
# Why: #7754 in Alexa global
'http://www.howtoforge.com/',
# Why: #7755 in Alexa global
'http://www.cvent.com/',
# Why: #7756 in Alexa global
'http://www.fujitsu.com/',
# Why: #7757 in Alexa global
'http://www.silvergames.com/',
# Why: #7758 in Alexa global
'http://www.tp-link.com.cn/',
# Why: #7759 in Alexa global
'http://www.fatlossfactor.com/',
# Why: #7760 in Alexa global
'http://www.nusport.nl/',
# Why: #7761 in Alexa global
'http://www.todo1.com/',
# Why: #7762 in Alexa global
'http://www.see-tube.com/',
# Why: #7763 in Alexa global
'http://www.lolspots.com/',
# Why: #7764 in Alexa global
'http://www.sucksex.com/',
# Why: #7765 in Alexa global
'http://www.encontreinarede.com/',
# Why: #7766 in Alexa global
'http://www.myarabylinks.com/',
# Why: #7767 in Alexa global
'http://www.v-39.net/',
# Why: #7769 in Alexa global
'http://www.soompi.com/',
# Why: #7770 in Alexa global
'http://www.mltdb.com/',
# Why: #7771 in Alexa global
'http://www.websitetonight.com/',
# Why: #7772 in Alexa global
'http://www.bu.edu/',
# Why: #7773 in Alexa global
'http://www.lazada.co.th/',
# Why: #7774 in Alexa global
'http://www.mature-money.com/',
# Why: #7775 in Alexa global
'http://www.simplemachines.org/',
# Why: #7776 in Alexa global
'http://www.tnt-online.ru/',
# Why: #7777 in Alexa global
'http://www.disput.az/',
# Why: #7779 in Alexa global
'http://www.flirtcafe.de/',
# Why: #7780 in Alexa global
'http://www.d1net.com/',
# Why: #7781 in Alexa global
'http://www.infoplease.com/',
# Why: #7782 in Alexa global
'http://www.unseenimages.co.in/',
# Why: #7783 in Alexa global
'http://www.downloadatoz.com/',
# Why: #7784 in Alexa global
'http://www.norwegian.com/',
# Why: #7785 in Alexa global
'http://www.youtradefx.com/',
# Why: #7786 in Alexa global
'http://www.petapixel.com/',
# Why: #7787 in Alexa global
'http://www.bytes.com/',
# Why: #7788 in Alexa global
'http://ht.ly/',
# Why: #7789 in Alexa global
'http://www.jobberman.com/',
# Why: #7790 in Alexa global
'http://www.xenforo.com/',
# Why: #7791 in Alexa global
'http://www.pomponik.pl/',
# Why: #7792 in Alexa global
'http://www.siambit.org/',
# Why: #7793 in Alexa global
'http://www.twoplustwo.com/',
# Why: #7794 in Alexa global
'http://www.videoslasher.com/',
# Why: #7795 in Alexa global
'http://www.onvista.de/',
# Why: #7796 in Alexa global
'http://www.shopping-search.jp/',
# Why: #7797 in Alexa global
'http://www.canstockphoto.com/',
# Why: #7798 in Alexa global
'http://www.cash4flirt.com/',
# Why: #7799 in Alexa global
'http://www.flashgames.it/',
# Why: #7800 in Alexa global
'http://www.xxxdessert.com/',
# Why: #7801 in Alexa global
'http://www.cda.pl/',
# Why: #7803 in Alexa global
'http://www.costco.ca/',
# Why: #7804 in Alexa global
'http://www.elnuevodiario.com.ni/',
# Why: #7805 in Alexa global
'http://www.svtplay.se/',
# Why: #7806 in Alexa global
'http://www.ftc.gov/',
# Why: #7807 in Alexa global
'http://www.supersonicads.com/',
# Why: #7808 in Alexa global
'http://www.openstreetmap.org/',
# Why: #7809 in Alexa global
'http://www.chinamobile.com/',
# Why: #7810 in Alexa global
'http://www.fastspring.com/',
# Why: #7811 in Alexa global
'http://www.eprice.com.tw/',
# Why: #7813 in Alexa global
'http://www.mcdonalds.com/',
# Why: #7814 in Alexa global
'http://www.egloos.com/',
# Why: #7815 in Alexa global
'http://www.mouser.com/',
# Why: #7816 in Alexa global
'http://livemook.com/',
# Why: #7817 in Alexa global
'http://www.woxiu.com/',
# Why: #7818 in Alexa global
'http://www.pingler.com/',
# Why: #7819 in Alexa global
'http://www.ruelsoft.org/',
# Why: #7820 in Alexa global
'http://www.krone.at/',
# Why: #7821 in Alexa global
'http://www.internetbookshop.it/',
# Why: #7822 in Alexa global
'http://www.alibaba-inc.com/',
# Why: #7823 in Alexa global
'http://www.kimsufi.com/',
# Why: #7824 in Alexa global
'http://www.summitracing.com/',
# Why: #7826 in Alexa global
'http://www.parsfootball.com/',
# Why: #7827 in Alexa global
'http://www.standard.co.uk/',
# Why: #7828 in Alexa global
'http://www.photoblog.pl/',
# Why: #7829 in Alexa global
'http://www.bicaps.com/',
# Why: #7830 in Alexa global
'http://www.digitalplayground.com/',
# Why: #7831 in Alexa global
'http://www.zerochan.net/',
# Why: #7832 in Alexa global
'http://www.whosay.com/',
# Why: #7833 in Alexa global
'http://www.qualityseek.org/',
# Why: #7834 in Alexa global
'http://www.say7.info/',
# Why: #7835 in Alexa global
'http://www.rs.gov.br/',
# Why: #7836 in Alexa global
'http://www.wps.cn/',
# Why: #7837 in Alexa global
'http://www.google.co.mz/',
# Why: #7838 in Alexa global
'http://www.yourlustmovies.com/',
# Why: #7839 in Alexa global
'http://www.zalando.nl/',
# Why: #7840 in Alexa global
'http://www.jn.pt/',
# Why: #7841 in Alexa global
'http://www.homebase.co.uk/',
# Why: #7842 in Alexa global
'http://www.avis.com/',
# Why: #7843 in Alexa global
'http://www.healthboards.com/',
# Why: #7844 in Alexa global
'http://www.filmizlesene.com.tr/',
# Why: #7845 in Alexa global
'http://www.shoutcast.com/',
# Why: #7846 in Alexa global
'http://www.konami.jp/',
# Why: #7847 in Alexa global
'http://www.indiafreestuff.in/',
# Why: #7848 in Alexa global
'http://www.avval.ir/',
# Why: #7849 in Alexa global
'http://www.gamingwonderland.com/',
# Why: #7850 in Alexa global
'http://www.adage.com/',
# Why: #7851 in Alexa global
'http://www.asu.edu/',
# Why: #7852 in Alexa global
'http://www.froma.com/',
# Why: #7853 in Alexa global
'http://www.bezuzyteczna.pl/',
# Why: #7854 in Alexa global
'http://www.workopolis.com/',
# Why: #7855 in Alexa global
'http://extranetinvestment.com/',
# Why: #7856 in Alexa global
'http://www.lablue.de/',
# Why: #7857 in Alexa global
'http://www.geotauaisay.com/',
# Why: #7858 in Alexa global
'http://www.bestchange.ru/',
# Why: #7859 in Alexa global
'http://www.ptp22.com/',
# Why: #7860 in Alexa global
'http://www.tehparadox.com/',
# Why: #7861 in Alexa global
'http://www.ox.ac.uk/',
# Why: #7862 in Alexa global
'http://www.radaris.com/',
# Why: #7863 in Alexa global
'http://www.domdigger.com/',
# Why: #7864 in Alexa global
'http://www.lizads.com/',
# Why: #7865 in Alexa global
'http://www.chatvl.com/',
# Why: #7866 in Alexa global
'http://www.elle.com/',
# Why: #7867 in Alexa global
'http://www.soloaqui.es/',
# Why: #7868 in Alexa global
'http://www.tubejuggs.com/',
# Why: #7869 in Alexa global
'http://www.jsonline.com/',
# Why: #7870 in Alexa global
'http://www.ut.ac.ir/',
# Why: #7871 in Alexa global
'http://www.iitv.info/',
# Why: #7872 in Alexa global
'http://www.runetki.tv/',
# Why: #7873 in Alexa global
'http://www.hyundai.com/',
# Why: #7874 in Alexa global
'http://www.turkiye.gov.tr/',
# Why: #7875 in Alexa global
'http://www.jobstreet.com.sg/',
# Why: #7877 in Alexa global
'http://www.jp-sex.com/',
# Why: #7878 in Alexa global
'http://www.soccer.ru/',
# Why: #7879 in Alexa global
'http://www.slashfilm.com/',
# Why: #7880 in Alexa global
'http://www.couchtuner.eu/',
# Why: #7881 in Alexa global
'http://quanfan.com/',
# Why: #7882 in Alexa global
'http://www.porsche.com/',
# Why: #7883 in Alexa global
'http://www.craftsy.com/',
# Why: #7884 in Alexa global
'http://www.geizhals.at/',
# Why: #7885 in Alexa global
'http://www.spartoo.it/',
# Why: #7886 in Alexa global
'http://yxku.com/',
# Why: #7887 in Alexa global
'http://www.vodonet.net/',
# Why: #7888 in Alexa global
'http://www.photo.net/',
# Why: #7889 in Alexa global
'http://www.raiffeisen.ru/',
# Why: #7890 in Alexa global
'http://www.tablotala.com/',
# Why: #7891 in Alexa global
'http://www.theaa.com/',
# Why: #7892 in Alexa global
'http://www.idownloadblog.com/',
# Why: #7894 in Alexa global
'http://www.rodfile.com/',
# Why: #7895 in Alexa global
'http://www.alabout.com/',
# Why: #7896 in Alexa global
'http://www.f1news.ru/',
# Why: #7897 in Alexa global
'http://www.divxstage.eu/',
# Why: #7898 in Alexa global
'http://www.itusozluk.com/',
# Why: #7899 in Alexa global
'http://www.tokyodisneyresort.co.jp/',
# Why: #7900 in Alexa global
'http://www.hicdma.com/',
# Why: #7901 in Alexa global
'http://www.dota2lounge.com/',
# Why: #7902 in Alexa global
'http://www.meizu.cn/',
# Why: #7903 in Alexa global
'http://www.greensmut.com/',
# Why: #7904 in Alexa global
'http://www.bharatiyamobile.com/',
# Why: #7905 in Alexa global
'http://www.handycafe.com/',
# Why: #7906 in Alexa global
'http://www.regarder-film-gratuit.com/',
# Why: #7907 in Alexa global
'http://www.adultgeek.net/',
# Why: #7908 in Alexa global
'http://www.yintai.com/',
# Why: #7909 in Alexa global
'http://www.brasilescola.com/',
# Why: #7910 in Alexa global
'http://www.verisign.com/',
# Why: #7911 in Alexa global
'http://www.dnslink.com/',
# Why: #7912 in Alexa global
'http://www.standaard.be/',
# Why: #7913 in Alexa global
'http://www.cbengine.com/',
# Why: #7914 in Alexa global
'http://www.pchealthboost.com/',
# Why: #7915 in Alexa global
'http://www.dealdey.com/',
# Why: #7916 in Alexa global
'http://www.cnnturk.com/',
# Why: #7917 in Alexa global
'http://www.trutv.com/',
# Why: #7918 in Alexa global
'http://www.tahrirnews.com/',
# Why: #7919 in Alexa global
'http://www.getit.in/',
# Why: #7920 in Alexa global
'http://www.jquerymobile.com/',
# Why: #7921 in Alexa global
'http://www.girlgames.com/',
# Why: #7922 in Alexa global
'http://www.alhayat.com/',
# Why: #7923 in Alexa global
'http://www.ilpvideo.com/',
# Why: #7924 in Alexa global
'http://www.stihi.ru/',
# Why: #7925 in Alexa global
'http://www.skyscanner.ru/',
# Why: #7926 in Alexa global
'http://www.jamejamonline.ir/',
# Why: #7927 in Alexa global
'http://www.t3n.de/',
# Why: #7928 in Alexa global
'http://www.rent.com/',
# Why: #7929 in Alexa global
'http://www.telerik.com/',
# Why: #7930 in Alexa global
'http://www.tandfonline.com/',
# Why: #7931 in Alexa global
'http://www.argonas.com/',
# Why: #7932 in Alexa global
'http://www.ludokado.com/',
# Why: #7933 in Alexa global
'http://www.luvgag.com/',
# Why: #7934 in Alexa global
'http://www.myspongebob.ru/',
# Why: #7935 in Alexa global
'http://www.z5x.net/',
# Why: #7936 in Alexa global
'http://www.allhyipmon.ru/',
# Why: #7937 in Alexa global
'http://www.fanswong.com/',
# Why: #7939 in Alexa global
'http://www.oddee.com/',
# Why: #7940 in Alexa global
'http://guoli.com/',
# Why: #7942 in Alexa global
'http://www.wpzoom.com/',
# Why: #7943 in Alexa global
'http://www.2gheroon.com/',
# Why: #7944 in Alexa global
'http://www.artisteer.com/',
# Why: #7945 in Alexa global
'http://www.share-links.biz/',
# Why: #7946 in Alexa global
'http://www.flightstats.com/',
# Why: #7947 in Alexa global
'http://www.wisegeek.org/',
# Why: #7948 in Alexa global
'http://www.shuangtv.net/',
# Why: #7949 in Alexa global
'http://www.mylikes.com/',
# Why: #7950 in Alexa global
'http://www.0zz0.com/',
# Why: #7951 in Alexa global
'http://www.xiu.com/',
# Why: #7952 in Alexa global
'http://www.pornizle69.com/',
# Why: #7953 in Alexa global
'http://www.sendgrid.com/',
# Why: #7954 in Alexa global
'http://theweek.com/',
# Why: #7955 in Alexa global
'http://www.veetle.com/',
# Why: #7956 in Alexa global
'http://www.theanimalrescuesite.com/',
# Why: #7957 in Alexa global
'http://www.sears.ca/',
# Why: #7958 in Alexa global
'http://www.tianpin.com/',
# Why: #7959 in Alexa global
'http://www.thisdaylive.com/',
# Why: #7960 in Alexa global
'http://www.myfunlife.com/',
# Why: #7961 in Alexa global
'http://www.furaffinity.net/',
# Why: #7962 in Alexa global
'http://www.politiken.dk/',
# Why: #7963 in Alexa global
'http://www.youwatch.org/',
# Why: #7965 in Alexa global
'http://www.lesoir.be/',
# Why: #7966 in Alexa global
'http://www.toyokeizai.net/',
# Why: #7967 in Alexa global
'http://www.centos.org/',
# Why: #7968 in Alexa global
'http://www.sunnyplayer.com/',
# Why: #7969 in Alexa global
'http://www.knuddels.de/',
# Why: #7970 in Alexa global
'http://www.mturk.com/',
# Why: #7971 in Alexa global
'http://www.egymodern.com/',
# Why: #7972 in Alexa global
'http://www.semprot.com/',
# Why: #7973 in Alexa global
'http://www.monsterhigh.com/',
# Why: #7974 in Alexa global
'http://www.kompass.com/',
# Why: #7975 in Alexa global
'http://www.olx.com.ve/',
# Why: #7976 in Alexa global
'http://www.hq-xnxx.com/',
# Why: #7977 in Alexa global
'http://www.whorush.com/',
# Why: #7978 in Alexa global
'http://www.bongdaso.com/',
# Why: #7979 in Alexa global
'http://www.centrelink.gov.au/',
# Why: #7980 in Alexa global
'http://www.folha.com.br/',
# Why: #7981 in Alexa global
'http://www.getjetso.com/',
# Why: #7982 in Alexa global
'http://www.ycombinator.com/',
# Why: #7983 in Alexa global
'http://www.chouti.com/',
# Why: #7984 in Alexa global
'http://www.33lc.com/',
# Why: #7985 in Alexa global
'http://www.empowernetwork.com/+LO9YhVOjRPGiarC7uA9iA==/',
# Why: #7986 in Alexa global
'http://www.hostgator.com.br/',
# Why: #7987 in Alexa global
'http://www.emirates247.com/',
# Why: #7988 in Alexa global
'http://www.itpub.net/',
# Why: #7989 in Alexa global
'http://www.fsymbols.com/',
# Why: #7990 in Alexa global
'http://www.bestproducttesters.com/',
# Why: #7991 in Alexa global
'http://daodao.com/',
# Why: #7992 in Alexa global
'http://www.virtuemart.net/',
# Why: #7993 in Alexa global
'http://www.hindilinks4u.net/',
# Why: #7994 in Alexa global
'http://www.nnm.me/',
# Why: #7995 in Alexa global
'http://www.xplocial.com/',
# Why: #7996 in Alexa global
'http://www.apartments.com/',
# Why: #7997 in Alexa global
'http://www.ekolay.net/',
# Why: #7998 in Alexa global
'http://www.doviz.com/',
# Why: #7999 in Alexa global
'http://www.flixya.com/',
# Why: #8000 in Alexa global
'http://www.3almthqafa.com/',
# Why: #8001 in Alexa global
'http://www.zamalekfans.com/',
# Why: #8002 in Alexa global
'http://www.imeigu.com/',
# Why: #8003 in Alexa global
'http://www.wikibit.net/',
# Why: #8004 in Alexa global
'http://www.windstream.net/',
# Why: #8005 in Alexa global
'http://www.matichon.co.th/',
# Why: #8006 in Alexa global
'http://www.appshopper.com/',
# Why: #8007 in Alexa global
'http://www.socialbakers.com/',
# Why: #8008 in Alexa global
'http://www.1popov.ru/',
# Why: #8009 in Alexa global
'http://www.blikk.hu/',
# Why: #8010 in Alexa global
'http://www.bdr130.net/',
# Why: #8011 in Alexa global
'http://www.arizona.edu/',
# Why: #8012 in Alexa global
'http://www.madhyamam.com/',
# Why: #8013 in Alexa global
'http://www.mweb.co.za/',
# Why: #8014 in Alexa global
'http://www.affiliates.de/',
# Why: #8015 in Alexa global
'http://www.ebs.in/',
# Why: #8016 in Alexa global
'http://www.bestgfx.com/',
# Why: #8017 in Alexa global
'http://www.share-games.com/',
# Why: #8018 in Alexa global
'http://www.informador.com.mx/',
# Why: #8019 in Alexa global
'http://www.jobsite.co.uk/',
# Why: #8020 in Alexa global
'http://www.carters.com/',
# Why: #8021 in Alexa global
'http://www.kinghost.net/',
# Why: #8022 in Alexa global
'http://www.us1.com/',
# Why: #8024 in Alexa global
'http://www.archives.com/',
# Why: #8025 in Alexa global
'http://www.forosdelweb.com/',
# Why: #8026 in Alexa global
'http://www.siteslike.com/',
# Why: #8027 in Alexa global
'http://www.thedailyshow.com/',
# Why: #8028 in Alexa global
'http://www.68design.net/',
# Why: #8029 in Alexa global
'http://www.imtalk.org/',
# Why: #8030 in Alexa global
'http://www.visualwebsiteoptimizer.com/',
# Why: #8031 in Alexa global
'http://www.glarysoft.com/',
# Why: #8032 in Alexa global
'http://xhby.net/',
# Why: #8033 in Alexa global
'http://www.cosp.jp/',
# Why: #8034 in Alexa global
'http://www.email.cz/',
# Why: #8035 in Alexa global
'http://www.amateurs-gone-wild.com/',
# Why: #8036 in Alexa global
'http://www.davidwalsh.name/',
# Why: #8037 in Alexa global
'http://www.finalfantasyxiv.com/',
# Why: #8038 in Alexa global
'http://www.aa.com.tr/',
# Why: #8039 in Alexa global
'http://www.legalzoom.com/',
# Why: #8040 in Alexa global
'http://www.lifehack.org/',
# Why: #8041 in Alexa global
'http://www.mca.gov.in/',
# Why: #8042 in Alexa global
'http://www.hidrvids.com/',
# Why: #8043 in Alexa global
'http://netaatoz.jp/',
# Why: #8044 in Alexa global
'http://www.key.com/',
# Why: #8045 in Alexa global
'http://www.thumbtack.com/',
# Why: #8046 in Alexa global
'http://www.nujij.nl/',
# Why: #8047 in Alexa global
'http://www.cinetux.org/',
# Why: #8048 in Alexa global
'http://www.hmetro.com.my/',
# Why: #8049 in Alexa global
'http://www.ignou.ac.in/',
# Why: #8051 in Alexa global
'http://www.affilorama.com/',
# Why: #8052 in Alexa global
'http://www.pokemon.com/',
# Why: #8053 in Alexa global
'http://www.sportsnewsinternational.com/',
# Why: #8054 in Alexa global
'http://www.geek.com/',
# Why: #8055 in Alexa global
'http://www.larepublica.pe/',
# Why: #8056 in Alexa global
'http://www.europacasino.com/',
# Why: #8058 in Alexa global
'http://www.ok-porn.com/',
# Why: #8059 in Alexa global
'http://www.tutorialzine.com/',
# Why: #8060 in Alexa global
'http://www.google.com.bn/',
# Why: #8061 in Alexa global
'http://www.site5.com/',
# Why: #8062 in Alexa global
'http://www.trafficjunky.net/',
# Why: #8063 in Alexa global
'http://www.xueqiu.com/',
# Why: #8064 in Alexa global
'http://www.yournewscorner.com/',
# Why: #8065 in Alexa global
'http://www.metrotvnews.com/',
# Why: #8066 in Alexa global
'http://www.nichegalz.com/',
# Why: #8067 in Alexa global
'http://www.job.com/',
# Why: #8068 in Alexa global
'http://www.koimoi.com/',
# Why: #8069 in Alexa global
'http://www.questionablecontent.net/',
# Why: #8070 in Alexa global
'http://www.volaris.mx/',
# Why: #8071 in Alexa global
'http://www.rakuten.de/',
# Why: #8072 in Alexa global
'http://www.cyworld.com/',
# Why: #8073 in Alexa global
'http://www.yudu.com/',
# Why: #8074 in Alexa global
'http://www.zakon.kz/',
# Why: #8075 in Alexa global
'http://www.msi.com/',
# Why: #8076 in Alexa global
'http://www.darkxxxtube.com/',
# Why: #8077 in Alexa global
'http://www.samakal.net/',
# Why: #8078 in Alexa global
'http://www.appstorm.net/',
# Why: #8079 in Alexa global
'http://www.vulture.com/',
# Why: #8080 in Alexa global
'http://www.lswb.com.cn/',
# Why: #8081 in Alexa global
'http://www.racingpost.com/',
# Why: #8082 in Alexa global
'http://www.classicrummy.com/',
# Why: #8083 in Alexa global
'http://www.iegallery.com/',
# Why: #8084 in Alexa global
'http://www.cinemagia.ro/',
# Why: #8085 in Alexa global
'http://nullpoantenna.com/',
# Why: #8086 in Alexa global
'http://www.ihned.cz/',
# Why: #8087 in Alexa global
'http://vdolady.com/',
# Why: #8088 in Alexa global
'http://www.babes.com/',
# Why: #8089 in Alexa global
'http://www.komli.com/',
# Why: #8090 in Alexa global
'http://www.asianbeauties.com/',
# Why: #8091 in Alexa global
'http://www.onedate.com/',
# Why: #8092 in Alexa global
'http://www.adhitz.com/',
# Why: #8093 in Alexa global
'http://www.jjgirls.com/',
# Why: #8094 in Alexa global
'http://www.dot.tk/',
# Why: #8095 in Alexa global
'http://caras.uol.com.br/',
# Why: #8096 in Alexa global
'http://www.autobild.de/',
# Why: #8097 in Alexa global
'http://www.jobs-to-careers.com/',
# Why: #8098 in Alexa global
'http://movietickets.com/',
# Why: #8099 in Alexa global
'http://www.net4.in/',
# Why: #8100 in Alexa global
'http://www.crutchfield.com/',
# Why: #8101 in Alexa global
'http://www.subdivx.com/',
# Why: #8102 in Alexa global
'http://www.damai.cn/',
# Why: #8103 in Alexa global
'http://www.sirarcade.com/',
# Why: #8104 in Alexa global
'http://sitescoutadserver.com/',
# Why: #8105 in Alexa global
'http://www.fantasy-rivals.com/',
# Why: #8106 in Alexa global
'http://www.chegg.com/',
# Why: #8107 in Alexa global
'http://www.sportsmansguide.com/',
# Why: #8108 in Alexa global
'http://www.extremetech.com/',
# Why: #8109 in Alexa global
'http://www.loft.com/',
# Why: #8110 in Alexa global
'http://www.dirtyamateurtube.com/',
# Why: #8111 in Alexa global
'http://painelhost.uol.com.br/',
# Why: #8112 in Alexa global
'http://www.socialsex.biz/',
# Why: #8113 in Alexa global
'http://www.opensubtitles.us/',
# Why: #8114 in Alexa global
'http://www.infomoney.com.br/',
# Why: #8115 in Alexa global
'http://www.openstat.ru/',
# Why: #8116 in Alexa global
'http://www.adlandpro.com/',
# Why: #8117 in Alexa global
'http://www.trivago.de/',
# Why: #8118 in Alexa global
'http://feiren.com/',
# Why: #8119 in Alexa global
'http://www.lespac.com/',
# Why: #8120 in Alexa global
'http://www.icook.tw/',
# Why: #8121 in Alexa global
'http://www.iceporn.com/',
# Why: #8122 in Alexa global
'http://www.animehere.com/',
# Why: #8123 in Alexa global
'http://www.klix.ba/',
# Why: #8124 in Alexa global
'http://www.elitepvpers.com/',
# Why: #8125 in Alexa global
'http://www.mrconservative.com/',
# Why: #8126 in Alexa global
'http://www.tamu.edu/',
# Why: #8127 in Alexa global
'http://www.startv.com.tr/',
# Why: #8128 in Alexa global
'http://www.haber1903.com/',
# Why: #8129 in Alexa global
'http://www.apa.tv/',
# Why: #8130 in Alexa global
'http://uc.cn/',
# Why: #8131 in Alexa global
'http://www.idbi.com/',
# Why: #8132 in Alexa global
'http://www.golfchannel.com/',
# Why: #8133 in Alexa global
'http://www.pep.ph/',
# Why: #8134 in Alexa global
'http://www.toukoucity.to/',
# Why: #8135 in Alexa global
'http://www.empiremoney.com/',
# Why: #8136 in Alexa global
'http://www.androidauthority.com/',
# Why: #8137 in Alexa global
'http://www.ref4bux.com/',
# Why: #8138 in Alexa global
'http://www.digitaljournal.com/',
# Why: #8139 in Alexa global
'http://www.sporcle.com/',
# Why: #8141 in Alexa global
'http://www.183.com.cn/',
# Why: #8142 in Alexa global
'http://www.bzwbk.pl/',
# Why: #8143 in Alexa global
'http://lalamao.com/',
# Why: #8144 in Alexa global
'http://www.ziare.com/',
# Why: #8145 in Alexa global
'http://www.cliti.com/',
# Why: #8146 in Alexa global
'http://www.thatguywiththeglasses.com/',
# Why: #8147 in Alexa global
'http://www.vodu.ch/',
# Why: #8148 in Alexa global
'http://www.ycwb.com/',
# Why: #8149 in Alexa global
'http://www.bls.gov/',
# Why: #8150 in Alexa global
'http://www.matsui.co.jp/',
# Why: #8151 in Alexa global
'http://xmrc.com.cn/',
# Why: #8152 in Alexa global
'http://1tubenews.com/',
# Why: #8153 in Alexa global
'http://www.cl.ly/',
# Why: #8154 in Alexa global
'http://www.ing.be/',
# Why: #8155 in Alexa global
'http://www.bitterstrawberry.com/',
# Why: #8156 in Alexa global
'http://www.fubar.com/',
# Why: #8157 in Alexa global
'http://www.arabic-keyboard.org/',
# Why: #8158 in Alexa global
'http://www.mejortorrent.com/',
# Why: #8159 in Alexa global
'http://www.trendmicro.com/',
# Why: #8160 in Alexa global
'http://www.ap7am.com/',
# Why: #8161 in Alexa global
'http://www.windowsazure.com/',
# Why: #8162 in Alexa global
'http://www.q8yat.com/',
# Why: #8163 in Alexa global
'http://www.yyv.co/',
# Why: #8164 in Alexa global
'http://www.tvoy-start.com/',
# Why: #8165 in Alexa global
'http://www.creativetoolbars.com/',
# Why: #8166 in Alexa global
'http://www.forrent.com/',
# Why: #8167 in Alexa global
'http://www.mlstatic.com/',
# Why: #8168 in Alexa global
'http://www.like4like.org/',
# Why: #8169 in Alexa global
'http://www.alpha.gr/',
# Why: #8170 in Alexa global
'http://www.amkey.net/',
# Why: #8172 in Alexa global
'http://www.iwiw.hu/',
# Why: #8173 in Alexa global
'http://www.routard.com/',
# Why: #8174 in Alexa global
'http://www.teacherspayteachers.com/',
# Why: #8175 in Alexa global
'http://www.ahashare.com/',
# Why: #8176 in Alexa global
'http://www.ultoo.com/',
# Why: #8177 in Alexa global
'http://www.oakley.com/',
# Why: #8178 in Alexa global
'http://www.upforit.com/',
# Why: #8179 in Alexa global
'http://www.trafficbee.com/',
# Why: #8180 in Alexa global
'http://www.monster.co.uk/',
# Why: #8181 in Alexa global
'http://www.boulanger.fr/',
# Why: #8182 in Alexa global
'http://www.bloglines.com/',
# Why: #8183 in Alexa global
'http://www.wdc.com/',
# Why: #8184 in Alexa global
'http://www.backpackers.com.tw/',
# Why: #8185 in Alexa global
'http://www.el-nacional.com/',
# Why: #8186 in Alexa global
'http://www.bloggertipstricks.com/',
# Why: #8187 in Alexa global
'http://www.oreillyauto.com/',
# Why: #8188 in Alexa global
'http://www.hotpads.com/',
# Why: #8189 in Alexa global
'http://www.tubexvideo.com/',
# Why: #8190 in Alexa global
'http://www.mudainodocument.com/',
# Why: #8191 in Alexa global
'http://www.17car.com.cn/',
# Why: #8192 in Alexa global
'http://www.discoverpedia.info/',
# Why: #8193 in Alexa global
'http://www.noobteens.com/',
# Why: #8194 in Alexa global
'http://www.shockmansion.com/',
# Why: #8195 in Alexa global
'http://www.qudsonline.ir/',
# Why: #8196 in Alexa global
'http://www.mec.es/',
# Why: #8197 in Alexa global
'http://www.vt.edu/',
# Why: #8198 in Alexa global
'http://www.akelite.com/',
# Why: #8199 in Alexa global
'http://www.travelandleisure.com/',
# Why: #8200 in Alexa global
'http://www.sunnewsonline.com/',
# Why: #8201 in Alexa global
'http://www.tok2.com/',
# Why: #8202 in Alexa global
'http://www.truste.org/',
# Why: #8203 in Alexa global
'http://www.2dehands.be/',
# Why: #8204 in Alexa global
'http://www.hf365.com/',
# Why: #8205 in Alexa global
'http://www.westelm.com/',
# Why: #8206 in Alexa global
'http://www.radiko.jp/',
# Why: #8207 in Alexa global
'http://www.real.gr/',
# Why: #8208 in Alexa global
'http://www.blogcms.jp/',
# Why: #8209 in Alexa global
'http://www.downloadming.me/',
# Why: #8210 in Alexa global
'http://www.citromail.hu/',
# Why: #8211 in Alexa global
'http://www.fotocommunity.de/',
# Why: #8212 in Alexa global
'http://www.zapjuegos.com/',
# Why: #8213 in Alexa global
'http://www.aastocks.com/',
# Why: #8214 in Alexa global
'http://www.unb.br/',
# Why: #8215 in Alexa global
'http://www.adchakra.net/',
# Why: #8216 in Alexa global
'http://www.check24.de/',
# Why: #8217 in Alexa global
'http://www.vidto.me/',
# Why: #8218 in Alexa global
'http://www.peekyou.com/',
# Why: #8219 in Alexa global
'http://www.urssaf.fr/',
# Why: #8220 in Alexa global
'http://www.alixixi.com/',
# Why: #8221 in Alexa global
'http://www.winamp.com/',
# Why: #8222 in Alexa global
'http://www.xianguo.com/',
# Why: #8223 in Alexa global
'http://www.indiasextube.net/',
# Why: #8224 in Alexa global
'http://www.fitnea.com/',
# Why: #8225 in Alexa global
'http://www.telemundo.com/',
# Why: #8226 in Alexa global
'http://www.webnode.cz/',
# Why: #8227 in Alexa global
'http://www.kliksaya.com/',
# Why: #8228 in Alexa global
'http://www.wikileaks.org/',
# Why: #8229 in Alexa global
'http://www.myblog.it/',
# Why: #8231 in Alexa global
'http://www.99wed.com/',
# Why: #8232 in Alexa global
'http://www.adorika.com/',
# Why: #8233 in Alexa global
'http://www.siliconrus.com/',
# Why: #8235 in Alexa global
'http://www.dealmoon.com/',
# Why: #8236 in Alexa global
'http://www.ricanadfunds.com/',
# Why: #8237 in Alexa global
'http://www.vietcombank.com.vn/',
# Why: #8238 in Alexa global
'http://www.chemistry.com/',
# Why: #8239 in Alexa global
'http://www.reisen.de/',
# Why: #8240 in Alexa global
'http://www.torlock.com/',
# Why: #8241 in Alexa global
'http://www.wsop.com/',
# Why: #8242 in Alexa global
'http://www.travian.co.id/',
# Why: #8243 in Alexa global
'http://www.ipoll.com/',
# Why: #8244 in Alexa global
'http://www.bpiexpressonline.com/',
# Why: #8245 in Alexa global
'http://www.neeu.com/',
# Why: #8246 in Alexa global
'http://www.atmarkit.co.jp/',
# Why: #8247 in Alexa global
'http://www.beyondtherack.com/',
# Why: #8248 in Alexa global
'http://blueidea.com/',
# Why: #8249 in Alexa global
'http://www.tedata.net/',
# Why: #8250 in Alexa global
'http://www.gamesradar.com/',
# Why: #8251 in Alexa global
'http://www.big.az/',
# Why: #8252 in Alexa global
'http://www.h-douga.net/',
# Why: #8253 in Alexa global
'http://www.runnersworld.com/',
# Why: #8254 in Alexa global
'http://www.lumfile.com/',
# Why: #8255 in Alexa global
'http://ccoo.cn/',
# Why: #8256 in Alexa global
'http://www.u17.com/',
# Why: #8257 in Alexa global
'http://www.badjojo.com/',
# Why: #8259 in Alexa global
'http://eplus.jp/',
# Why: #8260 in Alexa global
'http://www.nginx.org/',
# Why: #8261 in Alexa global
'http://www.filmfanatic.com/',
# Why: #8262 in Alexa global
'http://www.filmey.com/',
# Why: #8263 in Alexa global
'http://www.mousebreaker.com/',
# Why: #8264 in Alexa global
'http://www.mihanstore.net/',
# Why: #8265 in Alexa global
'http://www.sharebuilder.com/',
# Why: #8266 in Alexa global
'http://cnhan.com/',
# Why: #8267 in Alexa global
'http://www.partnerwithtom.com/',
# Why: #8268 in Alexa global
'http://www.synonym.com/',
# Why: #8269 in Alexa global
'http://www.areaconnect.com/',
# Why: #8271 in Alexa global
'http://www.one.lt/',
# Why: #8272 in Alexa global
'http://www.mp3quran.net/',
# Why: #8273 in Alexa global
'http://www.anz.co.nz/',
# Why: #8274 in Alexa global
'http://www.buyincoins.com/',
# Why: #8275 in Alexa global
'http://www.surfline.com/',
# Why: #8276 in Alexa global
'http://www.packtpub.com/',
# Why: #8277 in Alexa global
'http://www.informe21.com/',
# Why: #8278 in Alexa global
'http://www.d4000.com/',
# Why: #8279 in Alexa global
'http://www.blog.cz/',
# Why: #8280 in Alexa global
'http://www.myredbook.com/',
# Why: #8281 in Alexa global
'http://www.seslisozluk.net/',
# Why: #8282 in Alexa global
'http://www.simple2advertise.com/',
# Why: #8283 in Alexa global
'http://www.bookit.com/',
# Why: #8284 in Alexa global
'http://www.eranico.com/',
# Why: #8285 in Alexa global
'http://www.pakwheels.com/',
# Why: #8286 in Alexa global
'http://www.x-rates.com/',
# Why: #8287 in Alexa global
'http://www.ilmatieteenlaitos.fi/',
# Why: #8288 in Alexa global
'http://www.vozforums.com/',
# Why: #8289 in Alexa global
'http://www.galerieslafayette.com/',
# Why: #8290 in Alexa global
'http://www.trafficswirl.com/',
# Why: #8291 in Alexa global
'http://www.mql4.com/',
# Why: #8292 in Alexa global
'http://www.torontosun.com/',
# Why: #8293 in Alexa global
'http://www.channel.or.jp/',
# Why: #8295 in Alexa global
'http://www.lebuteur.com/',
# Why: #8296 in Alexa global
'http://www.cruisecritic.com/',
# Why: #8297 in Alexa global
'http://www.rateyourmusic.com/',
# Why: #8298 in Alexa global
'http://www.binsearch.info/',
# Why: #8299 in Alexa global
'http://www.nrj.fr/',
# Why: #8300 in Alexa global
'http://www.megaflix.net/',
# Why: #8301 in Alexa global
'http://www.dosug.cz/',
# Why: #8302 in Alexa global
'http://www.spdb.com.cn/',
# Why: #8303 in Alexa global
'http://www.stop55.com/',
# Why: #8304 in Alexa global
'http://www.qqnz.com/',
# Why: #8305 in Alexa global
'http://ibuonline.com/',
# Why: #8306 in Alexa global
'http://www.jobego.com/',
# Why: #8307 in Alexa global
'http://www.euro.com.pl/',
# Why: #8308 in Alexa global
'http://www.quran.com/',
# Why: #8309 in Alexa global
'http://www.ad1.ru/',
# Why: #8310 in Alexa global
'http://www.avaz.ba/',
# Why: #8311 in Alexa global
'http://www.eloqua.com/',
# Why: #8312 in Alexa global
'http://www.educationconnection.com/',
# Why: #8313 in Alexa global
'http://www.dbank.com/',
# Why: #8314 in Alexa global
'http://www.whois.sc/',
# Why: #8315 in Alexa global
'http://www.youmob.com/',
# Why: #8316 in Alexa global
'http://www.101greatgoals.com/',
# Why: #8317 in Alexa global
'http://www.livefyre.com/',
# Why: #8318 in Alexa global
'http://www.sextubebox.com/',
# Why: #8319 in Alexa global
'http://www.shooshtime.com/',
# Why: #8320 in Alexa global
'http://www.tapuz.co.il/',
# Why: #8321 in Alexa global
'http://www.auchan.fr/',
# Why: #8322 in Alexa global
'http://www.pinkvilla.com/',
# Why: #8323 in Alexa global
'http://www.perspolisnews.com/',
# Why: #8324 in Alexa global
'http://www.scholastic.com/',
# Why: #8325 in Alexa global
'http://www.google.mu/',
# Why: #8326 in Alexa global
'http://www.forex4you.org/',
# Why: #8327 in Alexa global
'http://www.mandtbank.com/',
# Why: #8328 in Alexa global
'http://www.gnezdo.ru/',
# Why: #8329 in Alexa global
'http://www.lulu.com/',
# Why: #8330 in Alexa global
'http://www.anniezhang.com/',
# Why: #8331 in Alexa global
'http://www.bharian.com.my/',
# Why: #8332 in Alexa global
'http://www.comprafacil.com.br/',
# Why: #8333 in Alexa global
'http://www.mmafighting.com/',
# Why: #8334 in Alexa global
'http://www.autotrader.ca/',
# Why: #8335 in Alexa global
'http://www.vectorstock.com/',
# Why: #8336 in Alexa global
'http://www.convio.com/',
# Why: #8337 in Alexa global
'http://www.ktunnel.com/',
# Why: #8338 in Alexa global
'http://www.hbs.edu/',
# Why: #8339 in Alexa global
'http://www.mindspark.com/',
# Why: #8340 in Alexa global
'http://www.trovit.com.mx/',
# Why: #8341 in Alexa global
'http://www.thomsonreuters.com/',
# Why: #8342 in Alexa global
'http://www.yupptv.com/',
# Why: #8343 in Alexa global
'http://www.fullsail.edu/',
# Why: #8344 in Alexa global
'http://www.perfectworld.eu/',
# Why: #8345 in Alexa global
'http://www.ju51.com/',
# Why: #8346 in Alexa global
'http://www.newssnip.com/',
# Why: #8347 in Alexa global
'http://www.livemocha.com/',
# Why: #8348 in Alexa global
'http://www.nespresso.com/',
# Why: #8349 in Alexa global
'http://www.uinvest.com.ua/',
# Why: #8350 in Alexa global
'http://www.yazete.com/',
# Why: #8351 in Alexa global
'http://www.malaysiaairlines.com/',
# Why: #8352 in Alexa global
'http://www.clikseguro.com/',
# Why: #8353 in Alexa global
'http://www.marksdailyapple.com/',
# Why: #8354 in Alexa global
'http://www.topnewsquick.com/',
# Why: #8355 in Alexa global
'http://www.ikyu.com/',
# Why: #8356 in Alexa global
'http://www.mydocomo.com/',
# Why: #8357 in Alexa global
'http://www.tampabay.com/',
# Why: #8358 in Alexa global
'http://www.mo.gov/',
# Why: #8359 in Alexa global
'http://www.daiwa.co.jp/',
# Why: #8360 in Alexa global
'http://www.amiami.jp/',
# Why: #8361 in Alexa global
'http://www.oxfordjournals.org/',
# Why: #8362 in Alexa global
'http://www.manageyourloans.com/',
# Why: #8363 in Alexa global
'http://www.couponcabin.com/',
# Why: #8364 in Alexa global
'http://www.qmnnk.blog.163.com/',
# Why: #8365 in Alexa global
'http://www.mrmlsmatrix.com/',
# Why: #8366 in Alexa global
'http://www.knowd.com/',
# Why: #8367 in Alexa global
'http://www.ladbrokes.com/',
# Why: #8368 in Alexa global
'http://www.ikoo.com/',
# Why: #8369 in Alexa global
'http://www.devhub.com/',
# Why: #8370 in Alexa global
'http://www.dropjack.com/',
# Why: #8371 in Alexa global
'http://www.sadistic.pl/',
# Why: #8372 in Alexa global
'http://www.8comic.com/',
# Why: #8373 in Alexa global
'http://www.optimizepress.com/',
# Why: #8374 in Alexa global
'http://ofweek.com/',
# Why: #8375 in Alexa global
'http://www.msn.com.tw/',
# Why: #8376 in Alexa global
'http://www.donya-e-eqtesad.com/',
# Why: #8377 in Alexa global
'http://www.arabam.com/',
# Why: #8378 in Alexa global
'http://www.playtv.fr/',
# Why: #8379 in Alexa global
'http://www.yourtv.com.au/',
# Why: #8380 in Alexa global
'http://www.teamtalk.com/',
# Why: #8381 in Alexa global
'http://www.createsend.com/',
# Why: #8382 in Alexa global
'http://www.bitcointalk.org/',
# Why: #8383 in Alexa global
'http://www.microcenter.com/',
# Why: #8384 in Alexa global
'http://www.arcadeprehacks.com/',
# Why: #8385 in Alexa global
'http://www.sublimetext.com/',
# Why: #8386 in Alexa global
'http://www.posindonesia.co.id/',
# Why: #8387 in Alexa global
'http://www.paymaster.ru/',
# Why: #8388 in Alexa global
'http://www.ncore.cc/',
# Why: #8390 in Alexa global
'http://www.wikisource.org/',
# Why: #8391 in Alexa global
'http://www.wowgame.jp/',
# Why: #8392 in Alexa global
'http://www.notebooksbilliger.de/',
# Why: #8393 in Alexa global
'http://www.nayakhabar.com/',
# Why: #8394 in Alexa global
'http://www.tim.com.br/',
# Why: #8395 in Alexa global
'http://www.leggo.it/',
# Why: #8396 in Alexa global
'http://www.swoodoo.com/',
# Why: #8397 in Alexa global
'http://www.perfectgirls.es/',
# Why: #8398 in Alexa global
'http://www.beautystyleliving.com/',
# Why: #8399 in Alexa global
'http://www.xmaduras.com/',
# Why: #8400 in Alexa global
'http://www.e-shop.gr/',
# Why: #8401 in Alexa global
'http://www.k8.cn/',
# Why: #8402 in Alexa global
'http://www.27.cn/',
# Why: #8403 in Alexa global
'http://www.belastingdienst.nl/',
# Why: #8404 in Alexa global
'http://www.urbia.de/',
# Why: #8405 in Alexa global
'http://www.lovoo.net/',
# Why: #8406 in Alexa global
'http://www.citizensbank.com/',
# Why: #8407 in Alexa global
'http://www.gulesider.no/',
# Why: #8408 in Alexa global
'http://zhongsou.net/',
# Why: #8409 in Alexa global
'http://www.cinemablend.com/',
# Why: #8410 in Alexa global
'http://www.joydownload.com/',
# Why: #8411 in Alexa global
'http://www.cncmax.cn/',
# Why: #8412 in Alexa global
'http://www.telkom.co.id/',
# Why: #8413 in Alexa global
'http://www.nangaspace.com/',
# Why: #8414 in Alexa global
'http://www.panerabread.com/',
# Why: #8415 in Alexa global
'http://www.cinechest.com/',
# Why: #8416 in Alexa global
'http://www.flixjunky.com/',
# Why: #8417 in Alexa global
'http://www.berlin1.de/',
# Why: #8418 in Alexa global
'http://www.tabonito.pt/',
# Why: #8419 in Alexa global
'http://www.snob.ru/',
# Why: #8420 in Alexa global
'http://www.audiovkontakte.ru/',
# Why: #8421 in Alexa global
'http://www.linuxmint.com/',
# Why: #8422 in Alexa global
'http://www.freshdesk.com/',
# Why: #8423 in Alexa global
'http://www.professionali.ru/',
# Why: #8425 in Alexa global
'http://www.primelocation.com/',
# Why: #8426 in Alexa global
'http://www.femina.hu/',
# Why: #8427 in Alexa global
'http://www.jecontacte.com/',
# Why: #8428 in Alexa global
'http://www.celebritytoob.com/',
# Why: #8429 in Alexa global
'http://www.streamiz-filmze.com/',
# Why: #8430 in Alexa global
'http://l-tike.com/',
# Why: #8431 in Alexa global
'http://www.collegeconfidential.com/',
# Why: #8432 in Alexa global
'http://hafiz.gov.sa/',
# Why: #8433 in Alexa global
'http://www.mega-porno.ru/',
# Why: #8434 in Alexa global
'http://www.ivoox.com/',
# Why: #8435 in Alexa global
'http://www.lmgtfy.com/',
# Why: #8436 in Alexa global
'http://www.pclab.pl/',
# Why: #8437 in Alexa global
'http://www.preisvergleich.de/',
# Why: #8438 in Alexa global
'http://www.weeb.tv/',
# Why: #8439 in Alexa global
'http://www.80018.cn/',
# Why: #8440 in Alexa global
'http://www.tnews.ir/',
# Why: #8441 in Alexa global
'http://www.johnnys-net.jp/',
# Why: #8442 in Alexa global
'http://www.wwtdd.com/',
# Why: #8444 in Alexa global
'http://www.totalfilm.com/',
# Why: #8445 in Alexa global
'http://www.girlfriendvideos.com/',
# Why: #8446 in Alexa global
'http://www.wgt.com/',
# Why: #8447 in Alexa global
'http://www.iu.edu/',
# Why: #8448 in Alexa global
'http://www.topictorch.com/',
# Why: #8449 in Alexa global
'http://www.wenweipo.com/',
# Why: #8450 in Alexa global
'http://duitang.com/',
# Why: #8452 in Alexa global
'http://www.madrid.org/',
# Why: #8453 in Alexa global
'http://www.retrogamer.com/',
# Why: #8454 in Alexa global
'http://www.pantheranetwork.com/',
# Why: #8455 in Alexa global
'http://www.someecards.com/',
# Why: #8456 in Alexa global
'http://www.visafone.com.ng/',
# Why: #8457 in Alexa global
'http://www.infopraca.pl/',
# Why: #8458 in Alexa global
'http://www.nrelate.com/',
# Why: #8459 in Alexa global
'http://www.sia.az/',
# Why: #8460 in Alexa global
'http://www.wallbase.cc/',
# Why: #8461 in Alexa global
'http://www.shareflare.net/',
# Why: #8462 in Alexa global
'http://www.sammydress.com/',
# Why: #8463 in Alexa global
'http://www.goldesel.to/',
# Why: #8464 in Alexa global
'http://www.thefiscaltimes.com/',
# Why: #8465 in Alexa global
'http://www.freelogoservices.com/',
# Why: #8467 in Alexa global
'http://www.dealigg.com/',
# Why: #8468 in Alexa global
'http://www.babypips.com/',
# Why: #8469 in Alexa global
'http://www.diynetwork.com/',
# Why: #8470 in Alexa global
'http://www.porn99.net/',
# Why: #8471 in Alexa global
'http://www.skynewsarabia.com/',
# Why: #8472 in Alexa global
'http://www.eweb4.com/',
# Why: #8473 in Alexa global
'http://www.fedoraproject.org/',
# Why: #8474 in Alexa global
'http://www.nolo.com/',
# Why: #8475 in Alexa global
'http://www.homelink.com.cn/',
# Why: #8476 in Alexa global
'http://www.megabus.com/',
# Why: #8477 in Alexa global
'http://www.fao.org/',
# Why: #8478 in Alexa global
'http://www.am.ru/',
# Why: #8479 in Alexa global
'http://www.sportowefakty.pl/',
# Why: #8481 in Alexa global
'http://www.kidstaff.com.ua/',
# Why: #8482 in Alexa global
'http://www.jhu.edu/',
# Why: #8483 in Alexa global
'http://www.which.co.uk/',
# Why: #8484 in Alexa global
'http://www.sextubehd.xxx/',
# Why: #8485 in Alexa global
'http://www.swansonvitamins.com/',
# Why: #8486 in Alexa global
'http://www.iran-eng.com/',
# Why: #8487 in Alexa global
'http://www.fakenamegenerator.com/',
# Why: #8488 in Alexa global
'http://www.gosong.net/',
# Why: #8489 in Alexa global
'http://www.pep.com.cn/',
# Why: #8490 in Alexa global
'http://www.24open.ru/',
# Why: #8491 in Alexa global
'http://www.123sdfsdfsdfsd.ru/',
# Why: #8492 in Alexa global
'http://www.gotgayporn.com/',
# Why: #8493 in Alexa global
'http://www.zaq.ne.jp/',
# Why: #8494 in Alexa global
'http://www.casadellibro.com/',
# Why: #8495 in Alexa global
'http://www.ixwebhosting.com/',
# Why: #8496 in Alexa global
'http://www.buyorbury.com/',
# Why: #8497 in Alexa global
'http://www.getglue.com/',
# Why: #8498 in Alexa global
'http://www.864321.com/',
# Why: #8499 in Alexa global
'http://www.alivv.com/',
# Why: #8500 in Alexa global
'http://www.4.cn/',
# Why: #8501 in Alexa global
'http://www.competitor.com/',
# Why: #8502 in Alexa global
'http://www.iheima.com/',
# Why: #8503 in Alexa global
'http://www.submarinoviagens.com.br/',
# Why: #8504 in Alexa global
'http://emailsrvr.com/',
# Why: #8505 in Alexa global
'http://www.udacity.com/',
# Why: #8506 in Alexa global
'http://www.mcafeesecure.com/',
# Why: #8507 in Alexa global
'http://www.laposte.fr/',
# Why: #8508 in Alexa global
'http://olhardigital.uol.com.br/',
# Why: #8509 in Alexa global
'http://ppy.sh/',
# Why: #8510 in Alexa global
'http://www.rumah.com/',
# Why: #8511 in Alexa global
'http://www.pullbear.com/',
# Why: #8512 in Alexa global
'http://www.pkt.pl/',
# Why: #8513 in Alexa global
'http://www.jayde.com/',
# Why: #8514 in Alexa global
'http://www.myjoyonline.com/',
# Why: #8515 in Alexa global
'http://www.locopengu.com/',
# Why: #8516 in Alexa global
'http://www.vsnl.net.in/',
# Why: #8517 in Alexa global
'http://www.hornbunny.com/',
# Why: #8518 in Alexa global
'http://www.royalcaribbean.com/',
# Why: #8520 in Alexa global
'http://www.football.ua/',
# Why: #8521 in Alexa global
'http://www.thaifriendly.com/',
# Why: #8522 in Alexa global
'http://www.bankofthewest.com/',
# Why: #8523 in Alexa global
'http://www.indianprice.com/',
# Why: #8524 in Alexa global
'http://www.chodientu.vn/',
# Why: #8525 in Alexa global
'http://www.alison.com/',
# Why: #8526 in Alexa global
'http://www.eveonline.com/',
# Why: #8527 in Alexa global
'http://www.blogg.se/',
# Why: #8528 in Alexa global
'http://www.jetairways.com/',
# Why: #8529 in Alexa global
'http://www.larousse.fr/',
# Why: #8530 in Alexa global
'http://www.noticierodigital.com/',
# Why: #8531 in Alexa global
'http://mkfst.com/',
# Why: #8532 in Alexa global
'http://www.anyfiledownloader.com/',
# Why: #8533 in Alexa global
'http://www.tiramillas.net/',
# Why: #8534 in Alexa global
'http://www.telus.com/',
# Why: #8535 in Alexa global
'http://www.paperblog.com/',
# Why: #8536 in Alexa global
'http://www.songsterr.com/',
# Why: #8537 in Alexa global
'http://www.entremujeres.com/',
# Why: #8538 in Alexa global
'http://www.startsiden.no/',
# Why: #8539 in Alexa global
'http://www.hotspotshield.com/',
# Why: #8540 in Alexa global
'http://www.hosteurope.de/',
# Why: #8541 in Alexa global
'http://www.ebags.com/',
# Why: #8542 in Alexa global
'http://www.eenadupratibha.net/',
# Why: #8543 in Alexa global
'http://www.uppit.com/',
# Why: #8544 in Alexa global
'http://www.piaohua.com/',
# Why: #8545 in Alexa global
'http://www.xxxymovies.com/',
# Why: #8546 in Alexa global
'http://www.netbarg.com/',
# Why: #8547 in Alexa global
'http://www.chip.com.tr/',
# Why: #8548 in Alexa global
'http://xl.co.id/',
# Why: #8549 in Alexa global
'http://www.kowalskypage.com/',
# Why: #8550 in Alexa global
'http://www.afterdawn.com/',
# Why: #8551 in Alexa global
'http://www.locanto.com/',
# Why: #8552 in Alexa global
'http://www.liilas.com/',
# Why: #8553 in Alexa global
'http://www.superboy.com/',
# Why: #8554 in Alexa global
'http://www.indiavisiontv.com/',
# Why: #8555 in Alexa global
'http://www.ixquick.com/',
# Why: #8556 in Alexa global
'http://www.hotelium.com/',
# Why: #8557 in Alexa global
'http://www.twsela.com/',
# Why: #8558 in Alexa global
'http://www.newsmeback.com/',
# Why: #8559 in Alexa global
'http://www.perfectliving.com/',
# Why: #8560 in Alexa global
'http://www.laughingsquid.com/',
# Why: #8561 in Alexa global
'http://www.designboom.com/',
# Why: #8562 in Alexa global
'http://www.zigil.ir/',
# Why: #8563 in Alexa global
'http://www.coachfactory.com/',
# Why: #8564 in Alexa global
'http://www.wst.cn/',
# Why: #8565 in Alexa global
'http://www.kaboodle.com/',
# Why: #8566 in Alexa global
'http://www.fastmail.fm/',
# Why: #8567 in Alexa global
'http://www.threadless.com/',
# Why: #8568 in Alexa global
'http://www.wiseconvert.com/',
# Why: #8569 in Alexa global
'http://www.br.de/',
# Why: #8570 in Alexa global
'http://www.promovacances.com/',
# Why: #8572 in Alexa global
'http://www.wrzuta.pl/',
# Why: #8573 in Alexa global
'http://www.fromdoctopdf.com/',
# Why: #8574 in Alexa global
'http://www.ono.es/',
# Why: #8575 in Alexa global
'http://www.zinio.com/',
# Why: #8576 in Alexa global
'http://netcoc.com/',
# Why: #8577 in Alexa global
'http://www.eanswers.com/',
# Why: #8578 in Alexa global
'http://www.wallst.com/',
# Why: #8579 in Alexa global
'http://www.ipiccy.com/',
# Why: #8580 in Alexa global
'http://www.fastweb.it/',
# Why: #8581 in Alexa global
'http://www.kaufmich.com/',
# Why: #8582 in Alexa global
'http://www.groupon.co.za/',
# Why: #8583 in Alexa global
'http://www.cyzo.com/',
# Why: #8584 in Alexa global
'http://www.addic7ed.com/',
# Why: #8585 in Alexa global
'http://www.liuliangbao.cn/',
# Why: #8586 in Alexa global
'http://www.alintibaha.net/',
# Why: #8587 in Alexa global
'http://www.indiewire.com/',
# Why: #8588 in Alexa global
'http://www.needforspeed.com/',
# Why: #8590 in Alexa global
'http://www.e24.no/',
# Why: #8591 in Alexa global
'http://www.hupso.com/',
# Why: #8592 in Alexa global
'http://www.kathimerini.gr/',
# Why: #8593 in Alexa global
'http://www.worldoffiles.net/',
# Why: #8594 in Alexa global
'http://www.express.pk/',
# Why: #8595 in Alexa global
'http://www.wieszjak.pl/',
# Why: #8597 in Alexa global
'http://www.mobile.bg/',
# Why: #8598 in Alexa global
'http://www.subway.com/',
# Why: #8599 in Alexa global
'http://www.akhbarelyom.com/',
# Why: #8600 in Alexa global
'http://www.thisoldhouse.com/',
# Why: #8601 in Alexa global
'http://www.autoevolution.com/',
# Why: #8602 in Alexa global
'http://www.public-api.wordpress.com/',
# Why: #8603 in Alexa global
'http://www.airarabia.com/',
# Why: #8604 in Alexa global
'http://www.powerball.com/',
# Why: #8605 in Alexa global
'http://www.mais.uol.com.br/',
# Why: #8606 in Alexa global
'http://www.visa.com/',
# Why: #8607 in Alexa global
'http://www.gendai.net/',
# Why: #8608 in Alexa global
'http://www.gymboree.com/',
# Why: #8609 in Alexa global
'http://www.tvp.pl/',
# Why: #8610 in Alexa global
'http://www.sinhayasocialreader.com/',
# Why: #8611 in Alexa global
'http://a963.com/',
# Why: #8612 in Alexa global
'http://www.gamgos.ae/',
# Why: #8613 in Alexa global
'http://www.fx678.com/',
# Why: #8614 in Alexa global
'http://www.mp3round.com/',
# Why: #8615 in Alexa global
'http://www.komonews.com/',
# Why: #8616 in Alexa global
'http://www.contactcars.com/',
# Why: #8617 in Alexa global
'http://www.pdftoword.com/',
# Why: #8618 in Alexa global
'http://www.songtaste.com/',
# Why: #8620 in Alexa global
'http://www.squareup.com/',
# Why: #8621 in Alexa global
'http://www.newsevent24.com/',
# Why: #8622 in Alexa global
'http://www.dti.ne.jp/',
# Why: #8623 in Alexa global
'http://www.livestation.com/',
# Why: #8624 in Alexa global
'http://www.oldertube.com/',
# Why: #8625 in Alexa global
'http://www.rtl.fr/',
# Why: #8626 in Alexa global
'http://www.gather.com/',
# Why: #8627 in Alexa global
'http://www.liderendeportes.com/',
# Why: #8628 in Alexa global
'http://www.thewrap.com/',
# Why: #8629 in Alexa global
'http://www.viber.com/',
# Why: #8630 in Alexa global
'http://www.reklama5.mk/',
# Why: #8631 in Alexa global
'http://www.fonts.com/',
# Why: #8632 in Alexa global
'http://www.hrsaccount.com/',
# Why: #8633 in Alexa global
'http://www.bizcommunity.com/',
# Why: #8634 in Alexa global
'http://www.favicon.cc/',
# Why: #8635 in Alexa global
'http://www.totalping.com/',
# Why: #8636 in Alexa global
'http://www.live365.com/',
# Why: #8637 in Alexa global
'http://www.tlife.gr/',
# Why: #8638 in Alexa global
'http://www.imasters.com.br/',
# Why: #8639 in Alexa global
'http://www.n11.com/',
# Why: #8640 in Alexa global
'http://www.iam.ma/',
# Why: #8641 in Alexa global
'http://www.qq5.com/',
# Why: #8642 in Alexa global
'http://www.tvboxnow.com/',
# Why: #8643 in Alexa global
'http://www.limetorrents.com/',
# Why: #8644 in Alexa global
'http://www.bancopopular.es/',
# Why: #8645 in Alexa global
'http://www.ray-ban.com/',
# Why: #8646 in Alexa global
'http://www.drweb.com/',
# Why: #8647 in Alexa global
'http://www.hushmail.com/',
# Why: #8648 in Alexa global
'http://www.resuelvetudeuda.com/',
# Why: #8649 in Alexa global
'http://www.sharpnews.ru/',
# Why: #8650 in Alexa global
'http://www.hellocoton.fr/',
# Why: #8651 in Alexa global
'http://buysub.com/',
# Why: #8652 in Alexa global
'http://www.homemoviestube.com/',
# Why: #8653 in Alexa global
'http://www.utsandiego.com/',
# Why: #8654 in Alexa global
'http://www.learn4good.com/',
# Why: #8655 in Alexa global
'http://www.nii.ac.jp/',
# Why: #8656 in Alexa global
'http://www.girlsgogames.ru/',
# Why: #8657 in Alexa global
'http://www.talksport.co.uk/',
# Why: #8658 in Alexa global
'http://fap.to/',
# Why: #8659 in Alexa global
'http://www.teennick.com/',
# Why: #8660 in Alexa global
'http://www.seitwert.de/',
# Why: #8661 in Alexa global
'http://www.celebritymoviearchive.com/',
# Why: #8662 in Alexa global
'http://www.sukar.com/',
# Why: #8663 in Alexa global
'http://www.astromeridian.ru/',
# Why: #8664 in Alexa global
'http://www.zen-cart.com/',
# Why: #8665 in Alexa global
'http://www.1phads.com/',
# Why: #8666 in Alexa global
'http://www.plaisio.gr/',
# Why: #8667 in Alexa global
'http://www.photozou.jp/',
# Why: #8668 in Alexa global
'http://www.cplusplus.com/',
# Why: #8669 in Alexa global
'http://www.ewebse.com/',
# Why: #8670 in Alexa global
'http://6eat.com/',
# Why: #8672 in Alexa global
'http://www.payless.com/',
# Why: #8673 in Alexa global
'http://www.subaonet.com/',
# Why: #8674 in Alexa global
'http://www.dlisted.com/',
# Why: #8675 in Alexa global
'http://www.kia.com/',
# Why: #8676 in Alexa global
'http://www.lankahotnews.net/',
# Why: #8677 in Alexa global
'http://www.vg247.com/',
# Why: #8678 in Alexa global
'http://www.formstack.com/',
# Why: #8679 in Alexa global
'http://www.jobs.net/',
# Why: #8680 in Alexa global
'http://www.coolchaser.com/',
# Why: #8681 in Alexa global
'http://www.blackplanet.com/',
# Why: #8682 in Alexa global
'http://www.unionbank.com/',
# Why: #8683 in Alexa global
'http://www.record.com.mx/',
# Why: #8684 in Alexa global
'http://www.121ware.com/',
# Why: #8685 in Alexa global
'http://www.inkfrog.com/',
# Why: #8686 in Alexa global
'http://cnstock.com/',
# Why: #8687 in Alexa global
'http://www.marineaquariumfree.com/',
# Why: #8688 in Alexa global
'http://www.encuentra24.com/',
# Why: #8689 in Alexa global
'http://www.mixturecloud.com/',
# Why: #8690 in Alexa global
'http://www.yninfo.com/',
# Why: #8691 in Alexa global
'http://www.lesnumeriques.com/',
# Why: #8692 in Alexa global
'http://www.autopartswarehouse.com/',
# Why: #8693 in Alexa global
'http://www.lijit.com/',
# Why: #8694 in Alexa global
'http://www.ti.com/',
# Why: #8695 in Alexa global
'http://www.umd.edu/',
# Why: #8696 in Alexa global
'http://www.zdnet.co.uk/',
# Why: #8697 in Alexa global
'http://www.begin-download.com/',
# Why: #8698 in Alexa global
'http://www.showsiteinfo.us/',
# Why: #8699 in Alexa global
'http://www.uchicago.edu/',
# Why: #8700 in Alexa global
'http://www.whatsmyserp.com/',
# Why: #8701 in Alexa global
'http://www.asos.fr/',
# Why: #8702 in Alexa global
'http://www.ibosocial.com/',
# Why: #8703 in Alexa global
'http://www.amorenlinea.com/',
# Why: #8704 in Alexa global
'http://www.videopremium.tv/',
# Why: #8705 in Alexa global
'http://www.trkjmp.com/',
# Why: #8706 in Alexa global
'http://www.creativecow.net/',
# Why: #8707 in Alexa global
'http://www.webartex.ru/',
# Why: #8708 in Alexa global
'http://www.olx.com.ng/',
# Why: #8709 in Alexa global
'http://www.overclockzone.com/',
# Why: #8710 in Alexa global
'http://www.rongbay.com/',
# Why: #8711 in Alexa global
'http://www.maximustube.com/',
# Why: #8712 in Alexa global
'http://www.priberam.pt/',
# Why: #8713 in Alexa global
'http://www.comsenz.com/',
# Why: #8714 in Alexa global
'http://www.prensaescrita.com/',
# Why: #8715 in Alexa global
'http://www.gameslist.com/',
# Why: #8716 in Alexa global
'http://www.lingualeo.com/',
# Why: #8717 in Alexa global
'http://www.epfoservices.in/',
# Why: #8718 in Alexa global
'http://www.webbirga.net/',
# Why: #8719 in Alexa global
'http://www.pb.com/',
# Why: #8720 in Alexa global
'http://www.fineco.it/',
# Why: #8721 in Alexa global
'http://www.highrisehq.com/',
# Why: #8722 in Alexa global
'http://www.hotgoo.com/',
# Why: #8723 in Alexa global
'http://www.netdoctor.co.uk/',
# Why: #8725 in Alexa global
'http://domain.com/',
# Why: #8726 in Alexa global
'http://www.aramex.com/',
# Why: #8727 in Alexa global
'http://www.google.co.uz/',
# Why: #8728 in Alexa global
'http://www.savings.com/',
# Why: #8729 in Alexa global
'http://www.airtelbroadband.in/',
# Why: #8730 in Alexa global
'http://www.postimees.ee/',
# Why: #8731 in Alexa global
'http://www.wallsave.com/',
# Why: #8732 in Alexa global
'http://www.df.gob.mx/',
# Why: #8733 in Alexa global
'http://www.flashgames247.com/',
# Why: #8735 in Alexa global
'http://www.libsyn.com/',
# Why: #8736 in Alexa global
'http://www.goobike.com/',
# Why: #8737 in Alexa global
'http://www.trivago.com/',
# Why: #8738 in Alexa global
'http://www.mt.co.kr/',
# Why: #8739 in Alexa global
'http://www.android-hilfe.de/',
# Why: #8740 in Alexa global
'http://www.anquan.org/',
# Why: #8741 in Alexa global
'http://www.dota2.com/',
# Why: #8742 in Alexa global
'http://www.vladtv.com/',
# Why: #8743 in Alexa global
'http://www.oovoo.com/',
# Why: #8744 in Alexa global
'http://www.mybrowsercash.com/',
# Why: #8745 in Alexa global
'http://www.stafaband.info/',
# Why: #8746 in Alexa global
'http://www.vsao.vn/',
# Why: #8747 in Alexa global
'http://www.smithsonianmag.com/',
# Why: #8748 in Alexa global
'http://www.feedblitz.com/',
# Why: #8749 in Alexa global
'http://www.kibeloco.com.br/',
# Why: #8750 in Alexa global
'http://www.burningcamel.com/',
# Why: #8751 in Alexa global
'http://www.northwestern.edu/',
# Why: #8752 in Alexa global
'http://www.tucows.com/',
# Why: #8753 in Alexa global
'http://www.porn-granny-tube.com/',
# Why: #8754 in Alexa global
'http://www.linksys.com/',
# Why: #8755 in Alexa global
'http://www.avea.com.tr/',
# Why: #8756 in Alexa global
'http://www.ams.se/',
# Why: #8757 in Alexa global
'http://www.canadanepalvid.com/',
# Why: #8758 in Alexa global
'http://www.venmobulo.com/',
# Why: #8759 in Alexa global
'http://www.levi.com/',
# Why: #8760 in Alexa global
'http://www.freshome.com/',
# Why: #8761 in Alexa global
'http://www.loja2.com.br/',
# Why: #8762 in Alexa global
'http://www.gameduell.de/',
# Why: #8763 in Alexa global
'http://www.reserveamerica.com/',
# Why: #8764 in Alexa global
'http://www.fakings.com/',
# Why: #8765 in Alexa global
'http://www.akb48newstimes.jp/',
# Why: #8766 in Alexa global
'http://www.polygon.com/',
# Why: #8767 in Alexa global
'http://www.mtwebcenters.com.tw/',
# Why: #8768 in Alexa global
'http://www.news.mn/',
# Why: #8769 in Alexa global
'http://www.addictinginfo.org/',
# Why: #8770 in Alexa global
'http://www.bonanza.com/',
# Why: #8771 in Alexa global
'http://www.adlock.in/',
# Why: #8772 in Alexa global
'http://www.apni.tv/',
# Why: #8773 in Alexa global
'http://www.3m.com/',
# Why: #8774 in Alexa global
'http://www.gendama.jp/',
# Why: #8775 in Alexa global
'http://www.usingenglish.com/',
# Why: #8776 in Alexa global
'http://www.sammsoft.com/',
# Why: #8777 in Alexa global
'http://www.pedaily.cn/',
# Why: #8778 in Alexa global
'http://www.thevault.bz/',
# Why: #8779 in Alexa global
'http://www.groupon.my/',
# Why: #8780 in Alexa global
'http://www.banamex.com/',
# Why: #8781 in Alexa global
'http://hualongxiang.com/',
# Why: #8782 in Alexa global
'http://www.bodis.com/',
# Why: #8783 in Alexa global
'http://www.dqx.jp/',
# Why: #8784 in Alexa global
'http://www.io.ua/',
# Why: #8785 in Alexa global
'http://joy.cn/',
# Why: #8786 in Alexa global
'http://www.minglebox.com/',
# Why: #8787 in Alexa global
'http://www.forumspecialoffers.com/',
# Why: #8788 in Alexa global
'http://www.remax.com/',
# Why: #8789 in Alexa global
'http://www.makaan.com/',
# Why: #8790 in Alexa global
'http://www.voglioporno.com/',
# Why: #8791 in Alexa global
'http://www.chinaluxus.com/',
# Why: #8792 in Alexa global
'http://www.parenting.com/',
# Why: #8793 in Alexa global
'http://www.superdownloads.com.br/',
# Why: #8794 in Alexa global
'http://www.aeon.co.jp/',
# Why: #8795 in Alexa global
'http://www.nettavisen.no/',
# Why: #8796 in Alexa global
'http://www.21cbh.com/',
# Why: #8797 in Alexa global
'http://www.mobilestan.net/',
# Why: #8798 in Alexa global
'http://www.cheathappens.com/',
# Why: #8799 in Alexa global
'http://www.azxeber.com/',
# Why: #8800 in Alexa global
'http://www.foodgawker.com/',
# Why: #8801 in Alexa global
'http://www.miitbeian.gov.cn/',
# Why: #8802 in Alexa global
'http://www.eb80.com/',
# Why: #8803 in Alexa global
'http://www.dudamobile.com/',
# Why: #8804 in Alexa global
'http://www.sahafah.net/',
# Why: #8805 in Alexa global
'http://www.ait-themes.com/',
# Why: #8806 in Alexa global
'http://www.house.gov/',
# Why: #8807 in Alexa global
'http://www.ffffound.com/',
# Why: #8808 in Alexa global
'http://sssc.cn/',
# Why: #8809 in Alexa global
'http://www.khanwars.ir/',
# Why: #8810 in Alexa global
'http://www.wowslider.com/',
# Why: #8811 in Alexa global
'http://www.fashionara.com/',
# Why: #8812 in Alexa global
'http://www.pornxxxhub.com/',
# Why: #8813 in Alexa global
'http://www.minhavida.com.br/',
# Why: #8814 in Alexa global
'http://www.senzapudore.it/',
# Why: #8815 in Alexa global
'http://www.extra.cz/',
# Why: #8816 in Alexa global
'http://www.cinemark.com/',
# Why: #8817 in Alexa global
'http://www.career.ru/',
# Why: #8818 in Alexa global
'http://www.realself.com/',
# Why: #8819 in Alexa global
'http://www.i4455.com/',
# Why: #8820 in Alexa global
'http://www.ntlworld.com/',
# Why: #8821 in Alexa global
'http://chinaw3.com/',
# Why: #8822 in Alexa global
'http://www.berliner-sparkasse.de/',
# Why: #8823 in Alexa global
'http://www.autoscout24.be/',
# Why: #8824 in Alexa global
'http://www.heureka.sk/',
# Why: #8825 in Alexa global
'http://tienphong.vn/',
# Why: #8826 in Alexa global
'http://www.1001freefonts.com/',
# Why: #8827 in Alexa global
'http://www.bluestacks.com/',
# Why: #8828 in Alexa global
'http://www.livesports.pl/',
# Why: #8829 in Alexa global
'http://www.bd-pratidin.com/',
# Why: #8831 in Alexa global
'http://www.es.tl/',
# Why: #8832 in Alexa global
'http://www.backcountry.com/',
# Why: #8833 in Alexa global
'http://www.fourhourworkweek.com/',
# Why: #8834 in Alexa global
'http://ebay.cn/',
# Why: #8835 in Alexa global
'http://www.pointclicktrack.com/',
# Why: #8836 in Alexa global
'http://www.joomlacode.org/',
# Why: #8837 in Alexa global
'http://www.fantage.com/',
# Why: #8838 in Alexa global
'http://www.seowizard.ru/',
# Why: #8839 in Alexa global
'http://military38.com/',
# Why: #8840 in Alexa global
'http://www.wenkang.cn/',
# Why: #8842 in Alexa global
'http://www.swedbank.lt/',
# Why: #8843 in Alexa global
'http://www.govoyages.com/',
# Why: #8844 in Alexa global
'http://www.fgov.be/',
# Why: #8845 in Alexa global
'http://www.dengeki.com/',
# Why: #8846 in Alexa global
'http://www.3773.com.cn/',
# Why: #8847 in Alexa global
'http://www.ed4.net/',
# Why: #8848 in Alexa global
'http://www.mql5.com/',
# Why: #8849 in Alexa global
'http://www.gottabemobile.com/',
# Why: #8850 in Alexa global
'http://www.kdslife.com/',
# Why: #8851 in Alexa global
'http://5yi.com/',
# Why: #8852 in Alexa global
'http://www.bforex.com/',
# Why: #8853 in Alexa global
'http://www.eurogamer.net/',
# Why: #8854 in Alexa global
'http://www.az.pl/',
# Why: #8855 in Alexa global
'http://www.partypoker.com/',
# Why: #8856 in Alexa global
'http://www.cinapalace.com/',
# Why: #8857 in Alexa global
'http://www.sbt.com.br/',
# Why: #8858 in Alexa global
'http://www.nanos.jp/',
# Why: #8859 in Alexa global
'http://www.phpcms.cn/',
# Why: #8860 in Alexa global
'http://www.weatherzone.com.au/',
# Why: #8861 in Alexa global
'http://www.cutv.com/',
# Why: #8862 in Alexa global
'http://www.sweetwater.com/',
# Why: #8863 in Alexa global
'http://www.vodacom.co.za/',
# Why: #8864 in Alexa global
'http://www.hostgator.in/',
# Why: #8865 in Alexa global
'http://www.mojim.com/',
# Why: #8866 in Alexa global
'http://www.getnews.jp/',
# Why: #8868 in Alexa global
'http://www.eklablog.com/',
# Why: #8869 in Alexa global
'http://www.divaina.com/',
# Why: #8870 in Alexa global
'http://www.acces-charme.com/',
# Why: #8871 in Alexa global
'http://www.airfrance.fr/',
# Why: #8872 in Alexa global
'http://www.widgeo.net/',
# Why: #8873 in Alexa global
'http://www.whosdatedwho.com/',
# Why: #8874 in Alexa global
'http://www.funtrivia.com/',
# Why: #8875 in Alexa global
'http://www.servis24.cz/',
# Why: #8876 in Alexa global
'http://www.emagister.com/',
# Why: #8877 in Alexa global
'http://www.torrentkitty.com/',
# Why: #8878 in Alexa global
'http://www.abc.com.py/',
# Why: #8879 in Alexa global
'http://www.farfetch.com/',
# Why: #8880 in Alexa global
'http://www.gamestar.de/',
# Why: #8881 in Alexa global
'http://www.careers24.com/',
# Why: #8882 in Alexa global
'http://www.styleblazer.com/',
# Why: #8883 in Alexa global
'http://www.ibtesama.com/',
# Why: #8884 in Alexa global
'http://ifunny.mobi/',
# Why: #8885 in Alexa global
'http://www.antpedia.com/',
# Why: #8886 in Alexa global
'http://www.fivb.org/',
# Why: #8887 in Alexa global
'http://www.littleone.ru/',
# Why: #8888 in Alexa global
'http://www.rainbowdressup.com/',
# Why: #8889 in Alexa global
'http://www.zerozero.pt/',
# Why: #8890 in Alexa global
'http://www.edreams.com/',
# Why: #8891 in Alexa global
'http://www.whoishostingthis.com/',
# Why: #8892 in Alexa global
'http://www.gucci.com/',
# Why: #8893 in Alexa global
'http://www.animeplus.tv/',
# Why: #8894 in Alexa global
'http://www.five.tv/',
# Why: #8895 in Alexa global
'http://www.vacationstogo.com/',
# Why: #8896 in Alexa global
'http://www.dikaiologitika.gr/',
# Why: #8897 in Alexa global
'http://www.mmorpg.com/',
# Why: #8898 in Alexa global
'http://www.jcwhitney.com/',
# Why: #8899 in Alexa global
'http://www.russiandatingbeauties.com/',
# Why: #8900 in Alexa global
'http://www.xrstats.com/',
# Why: #8901 in Alexa global
'http://www.gm99.com/',
# Why: #8902 in Alexa global
'http://www.megashares.com/',
# Why: #8903 in Alexa global
'http://www.oscaro.com/',
# Why: #8904 in Alexa global
'http://www.yezizhu.com/',
# Why: #8905 in Alexa global
'http://www.get2ch.net/',
# Why: #8906 in Alexa global
'http://www.cheaperthandirt.com/',
# Why: #8907 in Alexa global
'http://www.telcel.com/',
# Why: #8908 in Alexa global
'http://www.themefuse.com/',
# Why: #8909 in Alexa global
'http://www.addictivetips.com/',
# Why: #8910 in Alexa global
'http://www.designshack.net/',
# Why: #8911 in Alexa global
'http://www.eurobank.gr/',
# Why: #8912 in Alexa global
'http://www.nexon.net/',
# Why: #8913 in Alexa global
'http://www.rakuya.com.tw/',
# Why: #8914 in Alexa global
'http://www.fulltiltpoker.eu/',
# Why: #8915 in Alexa global
'http://www.pimei.com/',
# Why: #8916 in Alexa global
'http://www.photoshop.com/',
# Why: #8917 in Alexa global
'http://www.domainnamesales.com/',
# Why: #8918 in Alexa global
'http://www.sky.fm/',
# Why: #8919 in Alexa global
'http://www.yasni.de/',
# Why: #8920 in Alexa global
'http://www.travian.ru/',
# Why: #8921 in Alexa global
'http://www.stickpage.com/',
# Why: #8922 in Alexa global
'http://www.joomla-master.org/',
# Why: #8923 in Alexa global
'http://www.sarkari-naukri.in/',
# Why: #8924 in Alexa global
'http://www.iphones.ru/',
# Why: #8925 in Alexa global
'http://www.foto.ru/',
# Why: #8927 in Alexa global
'http://www.smude.edu.in/',
# Why: #8928 in Alexa global
'http://www.gothamist.com/',
# Why: #8929 in Alexa global
'http://www.teslamotors.com/',
# Why: #8930 in Alexa global
'http://www.seobudget.ru/',
# Why: #8931 in Alexa global
'http://www.tiantian.com/',
# Why: #8932 in Alexa global
'http://www.smarter.co.jp/',
# Why: #8933 in Alexa global
'http://www.videohelp.com/',
# Why: #8934 in Alexa global
'http://www.textbroker.com/',
# Why: #8935 in Alexa global
'http://www.garena.com/',
# Why: #8936 in Alexa global
'http://www.patient.co.uk/',
# Why: #8937 in Alexa global
'http://www.20minutepayday.com/',
# Why: #8938 in Alexa global
'http://www.bgames.com/',
# Why: #8939 in Alexa global
'http://www.superherohype.com/',
# Why: #8940 in Alexa global
'http://www.sephora.com.br/',
# Why: #8941 in Alexa global
'http://www.interest.me/',
# Why: #8942 in Alexa global
'http://www.inhabitat.com/',
# Why: #8943 in Alexa global
'http://www.downloads.nl/',
# Why: #8944 in Alexa global
'http://www.rusnovosti.ru/',
# Why: #8945 in Alexa global
'http://www.mr-guangdong.com/',
# Why: #8946 in Alexa global
'http://www.greyhound.com/',
# Why: #8947 in Alexa global
'http://www.qd8.com.cn/',
# Why: #8948 in Alexa global
'http://www.okpay.com/',
# Why: #8949 in Alexa global
'http://www.amateurcommunity.com/',
# Why: #8950 in Alexa global
'http://www.jeunesseglobal.com/',
# Why: #8951 in Alexa global
'http://www.nigma.ru/',
# Why: #8952 in Alexa global
'http://www.brightcove.com/',
# Why: #8953 in Alexa global
'http://www.wabei.cn/',
# Why: #8954 in Alexa global
'http://www.safesearch.net/',
# Why: #8955 in Alexa global
'http://www.teluguone.com/',
# Why: #8956 in Alexa global
'http://www.custojusto.pt/',
# Why: #8957 in Alexa global
'http://www.telebank.ru/',
# Why: #8958 in Alexa global
'http://www.kuwait.tt/',
# Why: #8959 in Alexa global
'http://www.acs.org/',
# Why: #8960 in Alexa global
'http://www.sverigesradio.se/',
# Why: #8961 in Alexa global
'http://www.mps.it/',
# Why: #8963 in Alexa global
'http://www.utanbaby.com/',
# Why: #8964 in Alexa global
'http://www.junocloud.me/',
# Why: #8965 in Alexa global
'http://www.expedia.co.in/',
# Why: #8966 in Alexa global
'http://www.rosnet.ru/',
# Why: #8967 in Alexa global
'http://www.kanoon.ir/',
# Why: #8968 in Alexa global
'http://www.website.ws/',
# Why: #8969 in Alexa global
'http://www.bagittoday.com/',
# Why: #8970 in Alexa global
'http://www.gooya.com/',
# Why: #8971 in Alexa global
'http://www.travelchannel.com/',
# Why: #8972 in Alexa global
'http://www.chooseauto.com.cn/',
# Why: #8973 in Alexa global
'http://www.flix247.com/',
# Why: #8974 in Alexa global
'http://www.momsbangteens.com/',
# Why: #8975 in Alexa global
'http://www.photofacefun.com/',
# Why: #8976 in Alexa global
'http://www.vistaprint.fr/',
# Why: #8977 in Alexa global
'http://www.vidbux.com/',
# Why: #8978 in Alexa global
'http://www.edu.ro/',
# Why: #8979 in Alexa global
'http://www.hd-xvideos.com/',
# Why: #8980 in Alexa global
'http://www.woodworking4home.com/',
# Why: #8981 in Alexa global
'http://www.reformal.ru/',
# Why: #8982 in Alexa global
'http://www.morodora.com/',
# Why: #8983 in Alexa global
'http://www.gelbooru.com/',
# Why: #8984 in Alexa global
'http://www.porntalk.com/',
# Why: #8985 in Alexa global
'http://www.assurland.com/',
# Why: #8986 in Alexa global
'http://www.amalgama-lab.com/',
# Why: #8987 in Alexa global
'http://www.showtime.jp/',
# Why: #8988 in Alexa global
'http://www.9to5mac.com/',
# Why: #8989 in Alexa global
'http://www.linux.org.ru/',
# Why: #8990 in Alexa global
'http://www.dolartoday.com/',
# Why: #8991 in Alexa global
'http://www.theme-junkie.com/',
# Why: #8992 in Alexa global
'http://www.seolib.ru/',
# Why: #8993 in Alexa global
'http://www.unesco.org/',
# Why: #8994 in Alexa global
'http://www.porncontrol.com/',
# Why: #8995 in Alexa global
'http://www.topdocumentaryfilms.com/',
# Why: #8996 in Alexa global
'http://www.tvmovie.de/',
# Why: #8997 in Alexa global
'http://adsl.free.fr/',
# Why: #8998 in Alexa global
'http://www.sprinthost.ru/',
# Why: #8999 in Alexa global
'http://www.reason.com/',
# Why: #9000 in Alexa global
'http://www.morazzia.com/',
# Why: #9001 in Alexa global
'http://www.yellowmoxie.com/',
# Why: #9002 in Alexa global
'http://www.banggood.com/',
# Why: #9003 in Alexa global
'http://www.pex.jp/',
# Why: #9004 in Alexa global
'http://www.espn.com.br/',
# Why: #9005 in Alexa global
'http://www.memedad.com/',
# Why: #9006 in Alexa global
'http://www.lovebuddyhookup.com/',
# Why: #9007 in Alexa global
'http://www.scmp.com/',
# Why: #9008 in Alexa global
'http://www.kjendis.no/',
# Why: #9010 in Alexa global
'http://www.metro-cc.ru/',
# Why: #9011 in Alexa global
'http://www.disdus.com/',
# Why: #9012 in Alexa global
'http://www.nola.com/',
# Why: #9013 in Alexa global
'http://www.tubesplash.com/',
# Why: #9014 in Alexa global
'http://crx7601.com/',
# Why: #9015 in Alexa global
'http://www.iana.org/',
# Why: #9016 in Alexa global
'http://www.howrse.com/',
# Why: #9017 in Alexa global
'http://www.anime-sharing.com/',
# Why: #9018 in Alexa global
'http://www.geny.com/',
# Why: #9019 in Alexa global
'http://www.carrefour.es/',
# Why: #9020 in Alexa global
'http://www.kemalistgazete.net/',
# Why: #9021 in Alexa global
'http://www.freedirectory-list.com/',
# Why: #9022 in Alexa global
'http://www.girlgamey.com/',
# Why: #9023 in Alexa global
'http://www.blogbus.com/',
# Why: #9024 in Alexa global
'http://www.funlolx.com/',
# Why: #9025 in Alexa global
'http://www.zyue.com/',
# Why: #9026 in Alexa global
'http://www.freepeople.com/',
# Why: #9027 in Alexa global
'http://www.tgareed.com/',
# Why: #9028 in Alexa global
'http://www.lifestreetmedia.com/',
# Why: #9029 in Alexa global
'http://www.fybersearch.com/',
# Why: #9030 in Alexa global
'http://www.livefreefun.org/',
# Why: #9031 in Alexa global
'http://www.cairodar.com/',
# Why: #9032 in Alexa global
'http://www.suite101.com/',
# Why: #9033 in Alexa global
'http://www.elcinema.com/',
# Why: #9034 in Alexa global
'http://leiting001.com/',
# Why: #9035 in Alexa global
'http://www.ifttt.com/',
# Why: #9036 in Alexa global
'http://www.google.com.mm/',
# Why: #9037 in Alexa global
'http://www.gizbot.com/',
# Why: #9038 in Alexa global
'http://www.games2win.com/',
# Why: #9040 in Alexa global
'http://www.stiforp.com/',
# Why: #9041 in Alexa global
'http://www.nrc.nl/',
# Why: #9042 in Alexa global
'http://www.slashgear.com/',
# Why: #9043 in Alexa global
'http://www.girlsgames123.com/',
# Why: #9044 in Alexa global
'http://www.mmajunkie.com/',
# Why: #9045 in Alexa global
'http://www.cadenaser.com/',
# Why: #9046 in Alexa global
'http://www.frombar.com/',
# Why: #9047 in Alexa global
'http://www.katmirror.com/',
# Why: #9048 in Alexa global
'http://www.cnsnews.com/',
# Why: #9049 in Alexa global
'http://www.duolingo.com/',
# Why: #9050 in Alexa global
'http://www.afterbuy.de/',
# Why: #9051 in Alexa global
'http://www.jpc.com/',
# Why: #9052 in Alexa global
'http://www.publix.com/',
# Why: #9053 in Alexa global
'http://www.ehealthforum.com/',
# Why: #9054 in Alexa global
'http://www.budget.com/',
# Why: #9055 in Alexa global
'http://www.ipma.pt/',
# Why: #9056 in Alexa global
'http://www.meetladies.me/',
# Why: #9057 in Alexa global
'http://www.adroll.com/',
# Why: #9058 in Alexa global
'http://www.renxo.com/',
# Why: #9059 in Alexa global
'http://www.empireonline.com/',
# Why: #9060 in Alexa global
'http://www.modareb.com/',
# Why: #9061 in Alexa global
'http://www.gamedesign.jp/',
# Why: #9062 in Alexa global
'http://www.topmoviesdirect.com/',
# Why: #9063 in Alexa global
'http://www.mforos.com/',
# Why: #9064 in Alexa global
'http://www.pubarticles.com/',
# Why: #9065 in Alexa global
'http://www.primeshare.tv/',
# Why: #9066 in Alexa global
'http://www.flycell.com.tr/',
# Why: #9067 in Alexa global
'http://www.rapidvidz.com/',
# Why: #9068 in Alexa global
'http://www.kouclo.com/',
# Why: #9069 in Alexa global
'http://www.photography-on-the.net/',
# Why: #9070 in Alexa global
'http://www.tsn.ua/',
# Why: #9071 in Alexa global
'http://www.dreamamateurs.com/',
# Why: #9072 in Alexa global
'http://www.avenues.info/',
# Why: #9073 in Alexa global
'http://www.coolmath.com/',
# Why: #9074 in Alexa global
'http://www.pegast.ru/',
# Why: #9075 in Alexa global
'http://www.myplayyard.com/',
# Why: #9076 in Alexa global
'http://www.myscore.ru/',
# Why: #9077 in Alexa global
'http://www.theync.com/',
# Why: #9078 in Alexa global
'http://www.ducktoursoftampabay.com/',
# Why: #9079 in Alexa global
'http://www.marunadanmalayali.com/',
# Why: #9080 in Alexa global
'http://www.tribune.com.ng/',
# Why: #9081 in Alexa global
'http://www.83suncity.com/',
# Why: #9082 in Alexa global
'http://www.nissanusa.com/',
# Why: #9083 in Alexa global
'http://www.radio.de/',
# Why: #9084 in Alexa global
'http://www.diapers.com/',
# Why: #9086 in Alexa global
'http://myherbalife.com/',
# Why: #9087 in Alexa global
'http://www.flibusta.net/',
# Why: #9088 in Alexa global
'http://www.daft.ie/',
# Why: #9089 in Alexa global
'http://www.buycheapr.com/',
# Why: #9090 in Alexa global
'http://www.sportmaster.ru/',
# Why: #9091 in Alexa global
'http://www.wordhippo.com/',
# Why: #9092 in Alexa global
'http://www.gva.es/',
# Why: #9093 in Alexa global
'http://www.sport24.co.za/',
# Why: #9094 in Alexa global
'http://www.putariabrasileira.com/',
# Why: #9095 in Alexa global
'http://www.suddenlink.net/',
# Why: #9096 in Alexa global
'http://www.bangbrosnetwork.com/',
# Why: #9097 in Alexa global
'http://www.creaders.net/',
# Why: #9098 in Alexa global
'http://www.dailysteals.com/',
# Why: #9099 in Alexa global
'http://www.karakartal.com/',
# Why: #9100 in Alexa global
'http://www.tv-series.me/',
# Why: #9101 in Alexa global
'http://www.bongdaplus.vn/',
# Why: #9102 in Alexa global
'http://www.one.co.il/',
# Why: #9103 in Alexa global
'http://www.giga.de/',
# Why: #9104 in Alexa global
'http://www.contactmusic.com/',
# Why: #9105 in Alexa global
'http://www.informationweek.com/',
# Why: #9106 in Alexa global
'http://www.iqbank.ru/',
# Why: #9107 in Alexa global
'http://www.duapp.com/',
# Why: #9108 in Alexa global
'http://www.cgd.pt/',
# Why: #9109 in Alexa global
'http://www.yepporn.com/',
# Why: #9110 in Alexa global
'http://www.sharekhan.com/',
# Why: #9111 in Alexa global
'http://www.365online.com/',
# Why: #9112 in Alexa global
'http://www.thedailymeal.com/',
# Why: #9113 in Alexa global
'http://www.ag.ru/',
# Why: #9114 in Alexa global
'http://www.claro.com.ar/',
# Why: #9115 in Alexa global
'http://www.mediaworld.it/',
# Why: #9116 in Alexa global
'http://www.bestgore.com/',
# Why: #9117 in Alexa global
'http://www.mohajerist.com/',
# Why: #9118 in Alexa global
'http://www.passion-hd.com/',
# Why: #9119 in Alexa global
'http://www.smallbiztrends.com/',
# Why: #9120 in Alexa global
'http://www.vitals.com/',
# Why: #9121 in Alexa global
'http://www.rocketlawyer.com/',
# Why: #9122 in Alexa global
'http://www.vr-zone.com/',
# Why: #9123 in Alexa global
'http://www.doridro.com/',
# Why: #9124 in Alexa global
'http://www.expedia.it/',
# Why: #9125 in Alexa global
'http://www.aflam4you.tv/',
# Why: #9126 in Alexa global
'http://www.wisconsin.gov/',
# Why: #9127 in Alexa global
'http://www.chinavasion.com/',
# Why: #9128 in Alexa global
'http://www.bigpara.com/',
# Why: #9129 in Alexa global
'http://www.hightrafficacademy.com/',
# Why: #9130 in Alexa global
'http://www.novaposhta.ua/',
# Why: #9131 in Alexa global
'http://www.pearl.de/',
# Why: #9133 in Alexa global
'http://www.boobpedia.com/',
# Why: #9134 in Alexa global
'http://www.mycmapp.com/',
# Why: #9135 in Alexa global
'http://www.89.com/',
# Why: #9136 in Alexa global
'http://www.foxsportsla.com/',
# Why: #9137 in Alexa global
'http://www.annauniv.edu/',
# Why: #9138 in Alexa global
'http://www.tri.co.id/',
# Why: #9139 in Alexa global
'http://www.browsershots.org/',
# Why: #9140 in Alexa global
'http://www.newindianexpress.com/',
# Why: #9141 in Alexa global
'http://www.washingtonexaminer.com/',
# Why: #9142 in Alexa global
'http://www.mozillazine.org/',
# Why: #9143 in Alexa global
'http://www.mg.co.za/',
# Why: #9144 in Alexa global
'http://www.newalbumreleases.net/',
# Why: #9145 in Alexa global
'http://www.trombi.com/',
# Why: #9146 in Alexa global
'http://www.pimsleurapproach.com/',
# Why: #9147 in Alexa global
'http://www.decathlon.es/',
# Why: #9149 in Alexa global
'http://www.shopmania.ro/',
# Why: #9150 in Alexa global
'http://www.brokenlinkcheck.com/',
# Why: #9151 in Alexa global
'http://www.forumeiros.com/',
# Why: #9152 in Alexa global
'http://www.moreniche.com/',
# Why: #9153 in Alexa global
'http://www.falabella.com/',
# Why: #9154 in Alexa global
'http://www.turner.com/',
# Why: #9155 in Alexa global
'http://vogue.com.cn/',
# Why: #9156 in Alexa global
'http://www.reachlocal.net/',
# Why: #9157 in Alexa global
'http://www.upsc.gov.in/',
# Why: #9158 in Alexa global
'http://www.allday2.com/',
# Why: #9159 in Alexa global
'http://www.dtiserv.com/',
# Why: #9160 in Alexa global
'http://www.singaporeair.com/',
# Why: #9161 in Alexa global
'http://www.patoghu.com/',
# Why: #9162 in Alexa global
'http://www.intercambiosvirtuales.org/',
# Why: #9163 in Alexa global
'http://www.bored.com/',
# Why: #9164 in Alexa global
'http://www.nn.ru/',
# Why: #9165 in Alexa global
'http://www.24smi.org/',
# Why: #9166 in Alexa global
'http://www.mobile-review.com/',
# Why: #9167 in Alexa global
'http://www.rbs.co.uk/',
# Why: #9168 in Alexa global
'http://www.westeros.org/',
# Why: #9169 in Alexa global
'http://www.dragonfable.com/',
# Why: #9170 in Alexa global
'http://www.wg-gesucht.de/',
# Why: #9171 in Alexa global
'http://www.ebaypartnernetwork.com/',
# Why: #9172 in Alexa global
'http://www.smartsheet.com/',
# Why: #9173 in Alexa global
'http://www.askul.co.jp/',
# Why: #9174 in Alexa global
'http://www.filmai.in/',
# Why: #9175 in Alexa global
'http://www.iranianuk.com/',
# Why: #9176 in Alexa global
'http://www.zhulang.com/',
# Why: #9177 in Alexa global
'http://www.game-game.com.ua/',
# Why: #9178 in Alexa global
'http://www.jigzone.com/',
# Why: #9179 in Alexa global
'http://www.vidbull.com/',
# Why: #9180 in Alexa global
'http://www.trustpilot.com/',
# Why: #9181 in Alexa global
'http://www.baodatviet.vn/',
# Why: #9182 in Alexa global
'http://www.haaretz.com/',
# Why: #9183 in Alexa global
'http://careerbuilder.co.in/',
# Why: #9184 in Alexa global
'http://www.veikkaus.fi/',
# Why: #9185 in Alexa global
'http://www.bmw.com.cn/',
# Why: #9186 in Alexa global
'http://www.potterybarnkids.com/',
# Why: #9187 in Alexa global
'http://www.freegamelot.com/',
# Why: #9188 in Alexa global
'http://www.worldtimeserver.com/',
# Why: #9189 in Alexa global
'http://www.jigsy.com/',
# Why: #9190 in Alexa global
'http://www.widgetbox.com/',
# Why: #9191 in Alexa global
'http://www.lasexta.com/',
# Why: #9192 in Alexa global
'http://www.mediav.com/',
# Why: #9193 in Alexa global
'http://www.aintitcool.com/',
# Why: #9194 in Alexa global
'http://www.youwillfind.info/',
# Why: #9195 in Alexa global
'http://www.bharatmatrimony.com/',
# Why: #9196 in Alexa global
'http://www.translated.net/',
# Why: #9197 in Alexa global
'http://www.virginia.edu/',
# Why: #9198 in Alexa global
'http://www.5566.net/',
# Why: #9199 in Alexa global
'http://www.questionmarket.com/',
# Why: #9200 in Alexa global
'http://www.587766.com/',
# Why: #9201 in Alexa global
'http://newspickup.com/',
# Why: #9202 in Alexa global
'http://www.womansday.com/',
# Why: #9203 in Alexa global
'http://www.segodnya.ua/',
# Why: #9204 in Alexa global
'http://www.reagancoalition.com/',
# Why: #9206 in Alexa global
'http://www.trafficswarm.com/',
# Why: #9207 in Alexa global
'http://www.orbitdownloader.com/',
# Why: #9208 in Alexa global
'http://www.filmehd.net/',
# Why: #9209 in Alexa global
'http://www.porn-star.com/',
# Why: #9210 in Alexa global
'http://www.lawyers.com/',
# Why: #9211 in Alexa global
'http://www.life.hu/',
# Why: #9212 in Alexa global
'http://www.listenonrepeat.com/',
# Why: #9213 in Alexa global
'http://www.phpfox.com/',
# Why: #9214 in Alexa global
'http://www.campusexplorer.com/',
# Why: #9215 in Alexa global
'http://www.eprothomalo.com/',
# Why: #9216 in Alexa global
'http://www.linekong.com/',
# Why: #9217 in Alexa global
'http://www.blogjava.net/',
# Why: #9218 in Alexa global
'http://www.qzone.cc/',
# Why: #9219 in Alexa global
'http://www.gamespassport.com/',
# Why: #9220 in Alexa global
'http://www.bet365.es/',
# Why: #9221 in Alexa global
'http://www.bikeradar.com/',
# Why: #9222 in Alexa global
'http://www.allmonitors.net/',
# Why: #9223 in Alexa global
'http://xwh.cn/',
# Why: #9224 in Alexa global
'http://www.naijaloaded.com/',
# Why: #9225 in Alexa global
'http://www.chazidian.com/',
# Why: #9226 in Alexa global
'http://www.channeladvisor.com/',
# Why: #9227 in Alexa global
'http://www.arenabg.com/',
# Why: #9228 in Alexa global
'http://www.briian.com/',
# Why: #9230 in Alexa global
'http://www.cucirca.eu/',
# Why: #9231 in Alexa global
'http://www.mamsy.ru/',
# Why: #9232 in Alexa global
'http://www.dl4all.com/',
# Why: #9233 in Alexa global
'http://www.wethreegreens.com/',
# Why: #9234 in Alexa global
'http://www.hsbc.co.in/',
# Why: #9235 in Alexa global
'http://www.squirt.org/',
# Why: #9236 in Alexa global
'http://www.sisal.it/',
# Why: #9237 in Alexa global
'http://www.bonprix.ru/',
# Why: #9238 in Alexa global
'http://www.odn.ne.jp/',
# Why: #9239 in Alexa global
'http://www.awd.ru/',
# Why: #9240 in Alexa global
'http://www.a-q-f.com/',
# Why: #9241 in Alexa global
'http://www.4game.com/',
# Why: #9242 in Alexa global
'http://www.24timezones.com/',
# Why: #9243 in Alexa global
'http://www.fgv.br/',
# Why: #9244 in Alexa global
'http://www.topnews.in/',
# Why: #9245 in Alexa global
'http://www.roku.com/',
# Why: #9246 in Alexa global
'http://www.ulub.pl/',
# Why: #9247 in Alexa global
'http://www.launchpad.net/',
# Why: #9248 in Alexa global
'http://www.simplyhired.co.in/',
# Why: #9249 in Alexa global
'http://www.response.jp/',
# Why: #9250 in Alexa global
'http://click.ro/',
# Why: #9251 in Alexa global
'http://www.thisis50.com/',
# Why: #9252 in Alexa global
'http://www.horoscopofree.com/',
# Why: #9253 in Alexa global
'http://www.comoeumesintoquando.tumblr.com/',
# Why: #9254 in Alexa global
'http://www.dlvr.it/',
# Why: #9255 in Alexa global
'http://www.4umf.com/',
# Why: #9256 in Alexa global
'http://www.picresize.com/',
# Why: #9257 in Alexa global
'http://www.aleqt.com/',
# Why: #9258 in Alexa global
'http://www.correos.es/',
# Why: #9259 in Alexa global
'http://www.pog.com/',
# Why: #9260 in Alexa global
'http://www.dlsoftware.org/',
# Why: #9261 in Alexa global
'http://www.primekhobor.com/',
# Why: #9262 in Alexa global
'http://www.dicionarioinformal.com.br/',
# Why: #9263 in Alexa global
'http://www.flixxy.com/',
# Why: #9264 in Alexa global
'http://www.hotklix.com/',
# Why: #9265 in Alexa global
'http://www.mglclub.com/',
# Why: #9266 in Alexa global
'http://www.airdroid.com/',
# Why: #9267 in Alexa global
'http://www.9281.net/',
# Why: #9268 in Alexa global
'http://faxingw.cn/',
# Why: #9269 in Alexa global
'http://www.satu.kz/',
# Why: #9270 in Alexa global
'http://www.carambatv.ru/',
# Why: #9271 in Alexa global
'http://www.autonews.ru/',
# Why: #9272 in Alexa global
'http://www.playerinstaller.com/',
# Why: #9273 in Alexa global
'http://www.swedbank.lv/',
# Why: #9274 in Alexa global
'http://www.enladisco.com/',
# Why: #9275 in Alexa global
'http://www.lib.ru/',
# Why: #9276 in Alexa global
'http://www.revolveclothing.com/',
# Why: #9277 in Alexa global
'http://www.aftermarket.pl/',
# Why: #9278 in Alexa global
'http://www.copy.com/',
# Why: #9279 in Alexa global
'http://www.muchgames.com/',
# Why: #9280 in Alexa global
'http://www.brigitte.de/',
# Why: #9281 in Alexa global
'http://www.ticketmaster.co.uk/',
# Why: #9282 in Alexa global
'http://www.cultofmac.com/',
# Why: #9283 in Alexa global
'http://www.bankontraffic.com/',
# Why: #9284 in Alexa global
'http://www.cnnamador.com/',
# Why: #9285 in Alexa global
'http://www.dwayir.com/',
# Why: #9286 in Alexa global
'http://www.davidicke.com/',
# Why: #9287 in Alexa global
'http://www.autosport.com/',
# Why: #9288 in Alexa global
'http://www.file.org/',
# Why: #9289 in Alexa global
'http://www.subtlepatterns.com/',
# Why: #9290 in Alexa global
'http://www.playmillion.com/',
# Why: #9291 in Alexa global
'http://www.gexing.com/',
# Why: #9292 in Alexa global
'http://www.thinkphp.cn/',
# Why: #9293 in Alexa global
'http://www.zum.com/',
# Why: #9294 in Alexa global
'http://www.eskimotube.com/',
# Why: #9295 in Alexa global
'http://www.guenstiger.de/',
# Why: #9296 in Alexa global
'http://www.diesiedleronline.de/',
# Why: #9297 in Alexa global
'http://www.nelly.com/',
# Why: #9298 in Alexa global
'http://www.press24.mk/',
# Why: #9299 in Alexa global
'http://www.psdgraphics.com/',
# Why: #9300 in Alexa global
'http://www.makeupalley.com/',
# Why: #9301 in Alexa global
'http://www.cloudify.cc/',
# Why: #9302 in Alexa global
'http://www.3a6aayer.com/',
# Why: #9303 in Alexa global
'http://www.apspsc.gov.in/',
# Why: #9304 in Alexa global
'http://www.dxy.cn/',
# Why: #9305 in Alexa global
'http://www.hotnews25.com/',
# Why: #9306 in Alexa global
'http://www.symbaloo.com/',
# Why: #9307 in Alexa global
'http://www.hiroimono.org/',
# Why: #9308 in Alexa global
'http://www.enbac.com/',
# Why: #9309 in Alexa global
'http://www.pornravage.com/',
# Why: #9310 in Alexa global
'http://abcfamily.go.com/',
# Why: #9311 in Alexa global
'http://www.fewo-direkt.de/',
# Why: #9312 in Alexa global
'http://www.elog-ch.net/',
# Why: #9313 in Alexa global
'http://www.n24.de/',
# Why: #9314 in Alexa global
'http://www.englishclub.com/',
# Why: #9315 in Alexa global
'http://www.ibicn.com/',
# Why: #9316 in Alexa global
'http://www.anibis.ch/',
# Why: #9317 in Alexa global
'http://www.tehran.ir/',
# Why: #9318 in Alexa global
'http://www.streamsex.com/',
# Why: #9319 in Alexa global
'http://www.drjays.com/',
# Why: #9320 in Alexa global
'http://www.islamqa.info/',
# Why: #9321 in Alexa global
'http://www.techandgaming247.com/',
# Why: #9322 in Alexa global
'http://www.apunkachoice.com/',
# Why: #9323 in Alexa global
'http://16888.com/',
# Why: #9324 in Alexa global
'http://www.morguefile.com/',
# Why: #9325 in Alexa global
'http://www.dalealplay.com/',
# Why: #9326 in Alexa global
'http://www.spinrewriter.com/',
# Why: #9327 in Alexa global
'http://www.newsmaxhealth.com/',
# Why: #9328 in Alexa global
'http://www.myvi.ru/',
# Why: #9329 in Alexa global
'http://www.moneysavingmom.com/',
# Why: #9331 in Alexa global
'http://www.jeux-fille-gratuit.com/',
# Why: #9332 in Alexa global
'http://www.swiki.jp/',
# Why: #9333 in Alexa global
'http://nowec.com/',
# Why: #9334 in Alexa global
'http://www.opn.com/',
# Why: #9335 in Alexa global
'http://www.idiva.com/',
# Why: #9336 in Alexa global
'http://www.bnc.ca/',
# Why: #9337 in Alexa global
'http://www.eater.com/',
# Why: #9338 in Alexa global
'http://www.designcrowd.com/',
# Why: #9339 in Alexa global
'http://www.jkforum.net/',
# Why: #9340 in Alexa global
'http://www.netkeiba.com/',
# Why: #9341 in Alexa global
'http://www.practicalecommerce.com/',
# Why: #9342 in Alexa global
'http://www.genuineptr.com/',
# Why: #9343 in Alexa global
'http://www.bloog.pl/',
# Why: #9344 in Alexa global
'http://www.ladunliadi.blogspot.com/',
# Why: #9345 in Alexa global
'http://www.stclick.ir/',
# Why: #9346 in Alexa global
'http://www.anwb.nl/',
# Why: #9347 in Alexa global
'http://www.mkyong.com/',
# Why: #9348 in Alexa global
'http://www.lavoixdunord.fr/',
# Why: #9349 in Alexa global
'http://www.top-inspector.ru/',
# Why: #9350 in Alexa global
'http://www.pornicom.com/',
# Why: #9351 in Alexa global
'http://www.yithemes.com/',
# Why: #9352 in Alexa global
'http://www.canada411.ca/',
# Why: #9353 in Alexa global
'http://www.mos.ru/',
# Why: #9354 in Alexa global
'http://www.somuch.com/',
# Why: #9355 in Alexa global
'http://www.nen.com.cn/',
# Why: #9356 in Alexa global
'http://www.runtastic.com/',
# Why: #9357 in Alexa global
'http://www.cadoinpiedi.it/',
# Why: #9358 in Alexa global
'http://www.google.co.bw/',
# Why: #9359 in Alexa global
'http://www.shkolazhizni.ru/',
# Why: #9360 in Alexa global
'http://www.heroku.com/',
# Why: #9361 in Alexa global
'http://www.net114.com/',
# Why: #9362 in Alexa global
'http://www.proprofs.com/',
# Why: #9363 in Alexa global
'http://www.banathi.com/',
# Why: #9364 in Alexa global
'http://www.bunte.de/',
# Why: #9365 in Alexa global
'http://pso2.jp/',
# Why: #9366 in Alexa global
'http://www.ncsecu.org/',
# Why: #9367 in Alexa global
'http://www.globalpost.com/',
# Why: #9368 in Alexa global
'http://www.comscore.com/',
# Why: #9370 in Alexa global
'http://www.wrapbootstrap.com/',
# Why: #9371 in Alexa global
'http://www.directupload.net/',
# Why: #9372 in Alexa global
'http://www.gpotato.eu/',
# Why: #9373 in Alexa global
'http://vipsister23.com/',
# Why: #9374 in Alexa global
'http://www.shopatron.com/',
# Why: #9375 in Alexa global
'http://www.aeroflot.ru/',
# Why: #9376 in Alexa global
'http://www.asiandatingbeauties.com/',
# Why: #9377 in Alexa global
'http://www.egooad.com/',
# Why: #9378 in Alexa global
'http://www.annunci69.it/',
# Why: #9379 in Alexa global
'http://www.yext.com/',
# Why: #9380 in Alexa global
'http://www.gruenderszene.de/',
# Why: #9382 in Alexa global
'http://www.veengle.com/',
# Why: #9383 in Alexa global
'http://www.reelzhot.com/',
# Why: #9384 in Alexa global
'http://www.enstage.com/',
# Why: #9385 in Alexa global
'http://www.icnetwork.co.uk/',
# Why: #9386 in Alexa global
'http://www.scarlet-clicks.info/',
# Why: #9388 in Alexa global
'http://www.brands4friends.de/',
# Why: #9389 in Alexa global
'http://www.watchersweb.com/',
# Why: #9390 in Alexa global
'http://www.music-clips.net/',
# Why: #9391 in Alexa global
'http://www.pornyeah.com/',
# Why: #9392 in Alexa global
'http://www.thehollywoodgossip.com/',
# Why: #9393 in Alexa global
'http://www.e5.ru/',
# Why: #9394 in Alexa global
'http://www.boldchat.com/',
# Why: #9395 in Alexa global
'http://www.maskolis.com/',
# Why: #9396 in Alexa global
'http://www.ba-k.com/',
# Why: #9397 in Alexa global
'http://www.monoprice.com/',
# Why: #9398 in Alexa global
'http://www.lacoste.com/',
# Why: #9399 in Alexa global
'http://www.byu.edu/',
# Why: #9400 in Alexa global
'http://www.zqgame.com/',
# Why: #9401 in Alexa global
'http://www.mofosex.com/',
# Why: #9402 in Alexa global
'http://www.roboxchange.com/',
# Why: #9403 in Alexa global
'http://www.elnuevoherald.com/',
# Why: #9404 in Alexa global
'http://www.joblo.com/',
# Why: #9405 in Alexa global
'http://www.songtexte.com/',
# Why: #9406 in Alexa global
'http://www.goodsearch.com/',
# Why: #9407 in Alexa global
'http://www.dnevnik.bg/',
# Why: #9408 in Alexa global
'http://www.tv.nu/',
# Why: #9409 in Alexa global
'http://www.movies.com/',
# Why: #9410 in Alexa global
'http://www.ganeshaspeaks.com/',
# Why: #9411 in Alexa global
'http://www.vonage.com/',
# Why: #9412 in Alexa global
'http://www.dawhois.com/',
# Why: #9413 in Alexa global
'http://www.companieshouse.gov.uk/',
# Why: #9414 in Alexa global
'http://www.ofertix.com/',
# Why: #9415 in Alexa global
'http://www.amaderforum.com/',
# Why: #9416 in Alexa global
'http://www.directorycritic.com/',
# Why: #9417 in Alexa global
'http://www.quickfilmz.com/',
# Why: #9418 in Alexa global
'http://www.youpornos.info/',
# Why: #9419 in Alexa global
'http://www.animeultima.tv/',
# Why: #9420 in Alexa global
'http://www.php.su/',
# Why: #9421 in Alexa global
'http://www.inciswf.com/',
# Why: #9422 in Alexa global
'http://www.bayern.de/',
# Why: #9423 in Alexa global
'http://www.hotarabchat.com/',
# Why: #9424 in Alexa global
'http://www.goodlayers.com/',
# Why: #9425 in Alexa global
'http://www.billiger.de/',
# Why: #9426 in Alexa global
'http://www.ponparemall.com/',
# Why: #9427 in Alexa global
'http://www.portaltvto.com/',
# Why: #9428 in Alexa global
'http://www.filesend.to/',
# Why: #9429 in Alexa global
'http://www.isimtescil.net/',
# Why: #9430 in Alexa global
'http://www.animeid.tv/',
# Why: #9431 in Alexa global
'http://www.trivago.es/',
# Why: #9433 in Alexa global
'http://www.17u.net/',
# Why: #9434 in Alexa global
'http://www.enekas.info/',
# Why: #9435 in Alexa global
'http://www.trendsonline.mobi/',
# Why: #9436 in Alexa global
'http://www.hostinger.ru/',
# Why: #9437 in Alexa global
'http://www.navad.net/',
# Why: #9438 in Alexa global
'http://www.mysupermarket.co.uk/',
# Why: #9440 in Alexa global
'http://www.webkinz.com/',
# Why: #9441 in Alexa global
'http://askfrank.net/',
# Why: #9442 in Alexa global
'http://www.pokernews.com/',
# Why: #9443 in Alexa global
'http://www.lyricsmania.com/',
# Why: #9444 in Alexa global
'http://www.chronicle.com/',
# Why: #9446 in Alexa global
'http://www.ns.nl/',
# Why: #9447 in Alexa global
'http://www.gaopeng.com/',
# Why: #9449 in Alexa global
'http://www.lifehacker.jp/',
# Why: #9450 in Alexa global
'http://www.96down.com/',
# Why: #9451 in Alexa global
'http://www.2500sz.com/',
# Why: #9453 in Alexa global
'http://www.paginasamarillas.com/',
# Why: #9454 in Alexa global
'http://www.kproxy.com/',
# Why: #9455 in Alexa global
'http://www.irantvto.ir/',
# Why: #9456 in Alexa global
'http://www.stuffgate.com/',
# Why: #9457 in Alexa global
'http://www.exler.ru/',
# Why: #9458 in Alexa global
'http://www.disney.es/',
# Why: #9459 in Alexa global
'http://www.turbocashsurfin.com/',
# Why: #9460 in Alexa global
'http://www.xmbs.jp/',
# Why: #9461 in Alexa global
'http://www.steadyhealth.com/',
# Why: #9462 in Alexa global
'http://www.thebotnet.com/',
# Why: #9463 in Alexa global
'http://www.newscientist.com/',
# Why: #9464 in Alexa global
'http://www.ampnetzwerk.de/',
# Why: #9465 in Alexa global
'http://www.htcmania.com/',
# Why: #9466 in Alexa global
'http://www.proceso.com.mx/',
# Why: #9468 in Alexa global
'http://www.teenport.com/',
# Why: #9469 in Alexa global
'http://www.tfilm.tv/',
# Why: #9470 in Alexa global
'http://www.trck.me/',
# Why: #9471 in Alexa global
'http://www.lifestartsat21.com/',
# Why: #9472 in Alexa global
'http://www.9show.com/',
# Why: #9473 in Alexa global
'http://www.expert.ru/',
# Why: #9474 in Alexa global
'http://www.mangalam.com/',
# Why: #9475 in Alexa global
'http://beyebe.com/',
# Why: #9476 in Alexa global
'http://www.ctrls.in/',
# Why: #9477 in Alexa global
'http://www.despegar.com.mx/',
# Why: #9478 in Alexa global
'http://www.bazingamob.com/',
# Why: #9479 in Alexa global
'http://www.netmagazine.com/',
# Why: #9480 in Alexa global
'http://www.sportssnip.com/',
# Why: #9481 in Alexa global
'http://www.lik.cl/',
# Why: #9483 in Alexa global
'http://www.targobank.de/',
# Why: #9484 in Alexa global
'http://www.hamsterporn.tv/',
# Why: #9485 in Alexa global
'http://www.lastfm.ru/',
# Why: #9486 in Alexa global
'http://www.wallinside.com/',
# Why: #9487 in Alexa global
'http://www.alawar.ru/',
# Why: #9488 in Alexa global
'http://www.ogame.org/',
# Why: #9489 in Alexa global
'http://www.guardiannews.com/',
# Why: #9490 in Alexa global
'http://www.intensedebate.com/',
# Why: #9491 in Alexa global
'http://www.citrix.com/',
# Why: #9492 in Alexa global
'http://www.ppt.cc/',
# Why: #9493 in Alexa global
'http://www.kavanga.ru/',
# Why: #9494 in Alexa global
'http://www.wotif.com/',
# Why: #9495 in Alexa global
'http://www.terapeak.com/',
# Why: #9496 in Alexa global
'http://www.swalif.com/',
# Why: #9497 in Alexa global
'http://www.demotivation.me/',
# Why: #9498 in Alexa global
'http://www.liquidweb.com/',
# Why: #9499 in Alexa global
'http://www.whydontyoutrythis.com/',
# Why: #9500 in Alexa global
'http://www.techhive.com/',
# Why: #9501 in Alexa global
'http://www.stylelist.com/',
# Why: #9502 in Alexa global
'http://www.shoppersstop.com/',
# Why: #9503 in Alexa global
'http://www.muare.vn/',
# Why: #9504 in Alexa global
'http://www.filezilla-project.org/',
# Why: #9505 in Alexa global
'http://www.wowwiki.com/',
# Why: #9506 in Alexa global
'http://www.ucm.es/',
# Why: #9507 in Alexa global
'http://www.plus.pl/',
# Why: #9509 in Alexa global
'http://www.goclips.tv/',
# Why: #9510 in Alexa global
'http://www.jeddahbikers.com/',
# Why: #9511 in Alexa global
'http://www.themalaysianinsider.com/',
# Why: #9512 in Alexa global
'http://www.buzznet.com/',
# Why: #9513 in Alexa global
'http://www.moonfruit.com/',
# Why: #9514 in Alexa global
'http://www.zivame.com/',
# Why: #9515 in Alexa global
'http://www.sproutsocial.com/',
# Why: #9516 in Alexa global
'http://www.evony.com/',
# Why: #9517 in Alexa global
'http://www.valuecommerce.com/',
# Why: #9518 in Alexa global
'http://www.cecile.co.jp/',
# Why: #9519 in Alexa global
'http://www.onlineconversion.com/',
# Why: #9520 in Alexa global
'http://www.adbooth.com/',
# Why: #9521 in Alexa global
'http://www.clubpartners.ru/',
# Why: #9522 in Alexa global
'http://www.rumah123.com/',
# Why: #9523 in Alexa global
'http://www.searspartsdirect.com/',
# Why: #9524 in Alexa global
'http://www.hollywood.com/',
# Why: #9525 in Alexa global
'http://www.divx.com/',
# Why: #9526 in Alexa global
'http://www.adverts.ie/',
# Why: #9527 in Alexa global
'http://www.filfan.com/',
# Why: #9528 in Alexa global
'http://www.t3.com/',
# Why: #9529 in Alexa global
'http://www.123vidz.com/',
# Why: #9530 in Alexa global
'http://www.technicpack.net/',
# Why: #9531 in Alexa global
'http://www.mightydeals.com/',
# Why: #9532 in Alexa global
'http://www.techgig.com/',
# Why: #9533 in Alexa global
'http://www.business.gov.au/',
# Why: #9534 in Alexa global
'http://www.phys.org/',
# Why: #9535 in Alexa global
'http://www.tweepi.com/',
# Why: #9536 in Alexa global
'http://www.bobfilm.net/',
# Why: #9537 in Alexa global
'http://www.phandroid.com/',
# Why: #9538 in Alexa global
'http://www.obozrevatel.com/',
# Why: #9539 in Alexa global
'http://www.elitedaily.com/',
# Why: #9540 in Alexa global
'http://www.tcfexpress.com/',
# Why: #9541 in Alexa global
'http://www.softaculous.com/',
# Why: #9542 in Alexa global
'http://www.xo.gr/',
# Why: #9543 in Alexa global
'http://www.cargocollective.com/',
# Why: #9544 in Alexa global
'http://www.airchina.com.cn/',
# Why: #9545 in Alexa global
'http://www.epicgameads.com/',
# Why: #9546 in Alexa global
'http://www.billigfluege.de/',
# Why: #9547 in Alexa global
'http://www.google.co.zm/',
# Why: #9548 in Alexa global
'http://www.flamingtext.com/',
# Why: #9549 in Alexa global
'http://www.mediatraffic.com/',
# Why: #9550 in Alexa global
'http://www.redboxinstant.com/',
# Why: #9551 in Alexa global
'http://www.tvquran.com/',
# Why: #9552 in Alexa global
'http://www.mstaml.com/',
# Why: #9553 in Alexa global
'http://www.polskieradio.pl/',
# Why: #9554 in Alexa global
'http://www.ipower.com/',
# Why: #9555 in Alexa global
'http://www.magicjack.com/',
# Why: #9556 in Alexa global
'http://www.linuxidc.com/',
# Why: #9557 in Alexa global
'http://www.audiojungle.net/',
# Why: #9558 in Alexa global
'http://www.zoomit.ir/',
# Why: #9559 in Alexa global
'http://www.celebritygossiplive.com/',
# Why: #9560 in Alexa global
'http://www.entheosweb.com/',
# Why: #9561 in Alexa global
'http://www.duke.edu/',
# Why: #9562 in Alexa global
'http://www.lamchame.com/',
# Why: #9563 in Alexa global
'http://www.trinixy.ru/',
# Why: #9564 in Alexa global
'http://www.heroeswm.ru/',
# Why: #9565 in Alexa global
'http://www.leovegas.com/',
# Why: #9566 in Alexa global
'http://www.redvak.com/',
# Why: #9567 in Alexa global
'http://www.wpexplorer.com/',
# Why: #9568 in Alexa global
'http://www.pornosexxxtits.com/',
# Why: #9569 in Alexa global
'http://www.thatrendsystem.com/',
# Why: #9570 in Alexa global
'http://www.minutouno.com/',
# Why: #9571 in Alexa global
'http://www.dnes.bg/',
# Why: #9572 in Alexa global
'http://www.raqq.com/',
# Why: #9573 in Alexa global
'http://www.misr5.com/',
# Why: #9574 in Alexa global
'http://www.m6replay.fr/',
# Why: #9575 in Alexa global
'http://www.ciao.es/',
# Why: #9576 in Alexa global
'http://www.indiatvnews.com/',
# Why: #9577 in Alexa global
'http://www.transunion.com/',
# Why: #9578 in Alexa global
'http://www.mha.nic.in/',
# Why: #9579 in Alexa global
'http://www.listia.com/',
# Why: #9580 in Alexa global
'http://www.duba.net/',
# Why: #9581 in Alexa global
'http://www.apec.fr/',
# Why: #9582 in Alexa global
'http://www.dexknows.com/',
# Why: #9583 in Alexa global
'http://www.americangirl.com/',
# Why: #9584 in Alexa global
'http://www.seekbang.com/',
# Why: #9585 in Alexa global
'http://www.greenmangaming.com/',
# Why: #9586 in Alexa global
'http://www.ptfish.com/',
# Why: #9587 in Alexa global
'http://www.myjob.com.cn/',
# Why: #9588 in Alexa global
'http://www.mistrzowie.org/',
# Why: #9589 in Alexa global
'http://www.chinatrust.com.tw/',
# Why: #9590 in Alexa global
'http://kongfz.com/',
# Why: #9591 in Alexa global
'http://www.finam.ru/',
# Why: #9592 in Alexa global
'http://www.tapiture.com/',
# Why: #9593 in Alexa global
'http://www.beon.ru/',
# Why: #9594 in Alexa global
'http://www.redsurf.ru/',
# Why: #9595 in Alexa global
'http://www.jamiiforums.com/',
# Why: #9596 in Alexa global
'http://www.grannysextubez.com/',
# Why: #9597 in Alexa global
'http://www.adlux.com/',
# Why: #9598 in Alexa global
'http://www.just-eat.co.uk/',
# Why: #9599 in Alexa global
'http://www.live24.gr/',
# Why: #9600 in Alexa global
'http://www.moip.com.br/',
# Why: #9601 in Alexa global
'http://www.chanel.com/',
# Why: #9602 in Alexa global
'http://www.sbs.co.kr/',
# Why: #9603 in Alexa global
'http://www.screwfix.com/',
# Why: #9604 in Alexa global
'http://www.trivago.it/',
# Why: #9605 in Alexa global
'http://airw.net/',
# Why: #9606 in Alexa global
'http://www.dietnavi.com/',
# Why: #9607 in Alexa global
'http://www.spartoo.es/',
# Why: #9608 in Alexa global
'http://www.game-debate.com/',
# Why: #9609 in Alexa global
'http://www.rotahaber.com/',
# Why: #9611 in Alexa global
'http://www.google.md/',
# Why: #9612 in Alexa global
'http://www.pornsex69.com/',
# Why: #9613 in Alexa global
'http://tmgonlinemedia.nl/',
# Why: #9614 in Alexa global
'http://www.myvoffice.com/',
# Why: #9615 in Alexa global
'http://www.wroclaw.pl/',
# Why: #9616 in Alexa global
'http://www.finansbank.com.tr/',
# Why: #9617 in Alexa global
'http://www.govdelivery.com/',
# Why: #9618 in Alexa global
'http://www.gamesbox.com/',
# Why: #9619 in Alexa global
'http://37wan.com/',
# Why: #9620 in Alexa global
'http://www.portableapps.com/',
# Why: #9621 in Alexa global
'http://www.dateinasia.com/',
# Why: #9623 in Alexa global
'http://www.northerntool.com/',
# Why: #9624 in Alexa global
'http://www.51pinwei.com/',
# Why: #9625 in Alexa global
'http://www.ocregister.com/',
# Why: #9626 in Alexa global
'http://www.noelshack.com/',
# Why: #9627 in Alexa global
'http://www.ipanelonline.com/',
# Why: #9628 in Alexa global
'http://www.klart.se/',
# Why: #9629 in Alexa global
'http://www.ismedia.jp/',
# Why: #9630 in Alexa global
'http://hqew.com/',
# Why: #9631 in Alexa global
'http://www.moodle.org/',
# Why: #9632 in Alexa global
'http://www.westernunion.fr/',
# Why: #9633 in Alexa global
'http://www.medindia.net/',
# Why: #9634 in Alexa global
'http://www.sencha.com/',
# Why: #9635 in Alexa global
'http://www.moveon.org/',
# Why: #9636 in Alexa global
'http://www.sipeliculas.com/',
# Why: #9637 in Alexa global
'http://www.beachbody.com/',
# Why: #9639 in Alexa global
'http://www.experts-exchange.com/',
# Why: #9640 in Alexa global
'http://www.davidsbridal.com/',
# Why: #9641 in Alexa global
'http://www.apotheken-umschau.de/',
# Why: #9642 in Alexa global
'http://www.melaleuca.com/',
# Why: #9643 in Alexa global
'http://www.cdbaby.com/',
# Why: #9644 in Alexa global
'http://www.humblebundle.com/',
# Why: #9645 in Alexa global
'http://www.telenet.be/',
# Why: #9646 in Alexa global
'http://www.labaq.com/',
# Why: #9647 in Alexa global
'http://www.smartaddons.com/',
# Why: #9648 in Alexa global
'http://www.vukajlija.com/',
# Why: #9649 in Alexa global
'http://www.zalando.es/',
# Why: #9650 in Alexa global
'http://www.articlerich.com/',
# Why: #9651 in Alexa global
'http://www.dm456.com/',
# Why: #9652 in Alexa global
'http://www.global-adsopt.com/',
# Why: #9653 in Alexa global
'http://www.forumophilia.com/',
# Why: #9654 in Alexa global
'http://www.dafiti.com.mx/',
# Why: #9655 in Alexa global
'http://www.funnystuff247.org/',
# Why: #9656 in Alexa global
'http://www.300mbfilms.com/',
# Why: #9657 in Alexa global
'http://www.xvideospornogratis.com/',
# Why: #9658 in Alexa global
'http://www.readnovel.com/',
# Why: #9659 in Alexa global
'http://www.khmer-news.org/',
# Why: #9660 in Alexa global
'http://www.media970.com/',
# Why: #9661 in Alexa global
'http://www.zwinky.com/',
# Why: #9662 in Alexa global
'http://www.newsbullet.in/',
# Why: #9663 in Alexa global
'http://www.pingfarm.com/',
# Why: #9664 in Alexa global
'http://www.lovetoknow.com/',
# Why: #9665 in Alexa global
'http://www.dntx.com/',
# Why: #9666 in Alexa global
'http://www.dip.jp/',
# Why: #9667 in Alexa global
'http://www.pap.fr/',
# Why: #9668 in Alexa global
'http://www.dizzcloud.com/',
# Why: #9669 in Alexa global
'http://www.nav.no/',
# Why: #9670 in Alexa global
'http://www.lotto.pl/',
# Why: #9671 in Alexa global
'http://www.freemp3whale.com/',
# Why: #9672 in Alexa global
'http://www.smartadserver.com/',
# Why: #9673 in Alexa global
'http://www.westpac.co.nz/',
# Why: #9674 in Alexa global
'http://www.kenrockwell.com/',
# Why: #9675 in Alexa global
'http://www.hongkongpost.com/',
# Why: #9676 in Alexa global
'http://www.delish.com/',
# Why: #9677 in Alexa global
'http://www.islam-lovers.com/',
# Why: #9678 in Alexa global
'http://www.edis.at/',
# Why: #9679 in Alexa global
'http://www.avery.com/',
# Why: #9680 in Alexa global
'http://www.giaitri.com/',
# Why: #9681 in Alexa global
'http://www.linksmanagement.com/',
# Why: #9682 in Alexa global
'http://www.beruby.com/',
# Why: #9683 in Alexa global
'http://www.1stwebgame.com/',
# Why: #9684 in Alexa global
'http://www.whocallsme.com/',
# Why: #9685 in Alexa global
'http://www.westwood.com/',
# Why: #9686 in Alexa global
'http://www.lmaohub.com/',
# Why: #9687 in Alexa global
'http://www.theresumator.com/',
# Why: #9688 in Alexa global
'http://www.nude.tv/',
# Why: #9689 in Alexa global
'http://www.nvrcp.com/',
# Why: #9690 in Alexa global
'http://www.bebinin.com/',
# Why: #9691 in Alexa global
'http://www.buddypress.org/',
# Why: #9693 in Alexa global
'http://www.uitzendinggemist.nl/',
# Why: #9694 in Alexa global
'http://www.majorleaguegaming.com/',
# Why: #9695 in Alexa global
'http://www.phpclasses.org/',
# Why: #9696 in Alexa global
'http://www.inteligo.pl/',
# Why: #9697 in Alexa global
'http://www.pinkbike.com/',
# Why: #9698 in Alexa global
'http://www.songlyrics.com/',
# Why: #9699 in Alexa global
'http://www.ct.gov/',
# Why: #9700 in Alexa global
'http://www.timeslive.co.za/',
# Why: #9701 in Alexa global
'http://www.snapwidget.com/',
# Why: #9702 in Alexa global
'http://www.watchkart.com/',
# Why: #9703 in Alexa global
'http://www.col3negoriginalcom.com/',
# Why: #9704 in Alexa global
'http://www.bronto.com/',
# Why: #9705 in Alexa global
'http://www.coasttocoastam.com/',
# Why: #9706 in Alexa global
'http://www.theladbible.com/',
# Why: #9707 in Alexa global
'http://narkive.com/',
# Why: #9708 in Alexa global
'http://www.the-village.ru/',
# Why: #9709 in Alexa global
'http://www.roem.ru/',
# Why: #9710 in Alexa global
'http://www.hi-pda.com/',
# Why: #9711 in Alexa global
'http://www.411.info/',
# Why: #9712 in Alexa global
'http://www.likesasap.com/',
# Why: #9713 in Alexa global
'http://www.blitz.bg/',
# Why: #9714 in Alexa global
'http://www.goodfon.ru/',
# Why: #9715 in Alexa global
'http://www.desktopnexus.com/',
# Why: #9716 in Alexa global
'http://www.demis.ru/',
# Why: #9717 in Alexa global
'http://www.begun.ru/',
# Why: #9718 in Alexa global
'http://www.ekikara.jp/',
# Why: #9719 in Alexa global
'http://www.linktech.cn/',
# Why: #9720 in Alexa global
'http://www.tezaktrafficpower.com/',
# Why: #9721 in Alexa global
'http://www.videos.com/',
# Why: #9722 in Alexa global
'http://www.pnet.co.za/',
# Why: #9723 in Alexa global
'http://www.rds.ca/',
# Why: #9724 in Alexa global
'http://www.dlink.com/',
# Why: #9725 in Alexa global
'http://www.ispajuegos.com/',
# Why: #9726 in Alexa global
'http://www.foxsportsasia.com/',
# Why: #9727 in Alexa global
'http://www.lexisnexis.com/',
# Why: #9728 in Alexa global
'http://www.ddproperty.com/',
# Why: #9729 in Alexa global
'http://www.1channelmovie.com/',
# Why: #9731 in Alexa global
'http://www.postimage.org/',
# Why: #9732 in Alexa global
'http://www.rahedaneshjou.ir/',
# Why: #9733 in Alexa global
'http://www.modern.az/',
# Why: #9734 in Alexa global
'http://www.givemegay.com/',
# Why: #9735 in Alexa global
'http://www.tejaratbank.net/',
# Why: #9736 in Alexa global
'http://www.rockpapershotgun.com/',
# Why: #9737 in Alexa global
'http://www.infogue.com/',
# Why: #9738 in Alexa global
'http://www.sfora.pl/',
# Why: #9739 in Alexa global
'http://www.liberoquotidiano.it/',
# Why: #9740 in Alexa global
'http://www.forumok.com/',
# Why: #9741 in Alexa global
'http://www.infonavit.org.mx/',
# Why: #9742 in Alexa global
'http://www.bankwest.com.au/',
# Why: #9743 in Alexa global
'http://www.al-mashhad.com/',
# Why: #9744 in Alexa global
'http://www.ogame.de/',
# Why: #9745 in Alexa global
'http://www.triviatoday.com/',
# Why: #9746 in Alexa global
'http://www.topspeed.com/',
# Why: #9747 in Alexa global
'http://www.kuku123.com/',
# Why: #9748 in Alexa global
'http://www.gayforit.eu/',
# Why: #9749 in Alexa global
'http://www.alahlionline.com/',
# Why: #9750 in Alexa global
'http://www.phonegap.com/',
# Why: #9752 in Alexa global
'http://www.superhry.cz/',
# Why: #9753 in Alexa global
'http://www.sweepstakes.com/',
# Why: #9754 in Alexa global
'http://www.australianbusinessgroup.net/',
# Why: #9755 in Alexa global
'http://www.nacion.com/',
# Why: #9756 in Alexa global
'http://www.futura-sciences.com/',
# Why: #9757 in Alexa global
'http://www.education.gouv.fr/',
# Why: #9758 in Alexa global
'http://www.haott.com/',
# Why: #9759 in Alexa global
'http://www.ey.com/',
# Why: #9760 in Alexa global
'http://www.roksa.pl/',
# Why: #9761 in Alexa global
'http://www.manoramanews.com/',
# Why: #9762 in Alexa global
'http://www.secretsearchenginelabs.com/',
# Why: #9763 in Alexa global
'http://www.alitui.com/',
# Why: #9764 in Alexa global
'http://www.depor.pe/',
# Why: #9765 in Alexa global
'http://www.rbc.com/',
# Why: #9766 in Alexa global
'http://www.tvaguuco.blogspot.se/',
# Why: #9767 in Alexa global
'http://www.mediaturf.net/',
# Why: #9768 in Alexa global
'http://www.mobilemoneycode.com/',
# Why: #9769 in Alexa global
'http://www.radio-canada.ca/',
# Why: #9770 in Alexa global
'http://www.shijue.me/',
# Why: #9771 in Alexa global
'http://www.upyim.com/',
# Why: #9772 in Alexa global
'http://www.indeed.com.br/',
# Why: #9773 in Alexa global
'http://www.indianrailways.gov.in/',
# Why: #9774 in Alexa global
'http://www.myfreepaysite.com/',
# Why: #9775 in Alexa global
'http://www.adchiever.com/',
# Why: #9776 in Alexa global
'http://www.xonei.com/',
# Why: #9777 in Alexa global
'http://www.kingworldnews.com/',
# Why: #9779 in Alexa global
'http://www.twenga.fr/',
# Why: #9780 in Alexa global
'http://www.oknation.net/',
# Why: #9782 in Alexa global
'http://www.zj4v.info/',
# Why: #9783 in Alexa global
'http://www.usanetwork.com/',
# Why: #9784 in Alexa global
'http://www.carphonewarehouse.com/',
# Why: #9785 in Alexa global
'http://www.impactradius.com/',
# Why: #9786 in Alexa global
'http://www.cinepolis.com/',
# Why: #9787 in Alexa global
'http://www.tvfun.ma/',
# Why: #9788 in Alexa global
'http://www.secureupload.eu/',
# Why: #9789 in Alexa global
'http://www.sarsefiling.co.za/',
# Why: #9790 in Alexa global
'http://www.flvmplayer.com/',
# Why: #9791 in Alexa global
'http://www.gemius.com.tr/',
# Why: #9792 in Alexa global
'http://www.alibris.com/',
# Why: #9793 in Alexa global
'http://www.insomniagamer.com/',
# Why: #9795 in Alexa global
'http://www.osxdaily.com/',
# Why: #9796 in Alexa global
'http://www.novasdodia.com/',
# Why: #9797 in Alexa global
'http://www.ayuwage.com/',
# Why: #9798 in Alexa global
'http://www.c-date.it/',
# Why: #9799 in Alexa global
'http://www.meetic.es/',
# Why: #9800 in Alexa global
'http://www.cineplex.com/',
# Why: #9801 in Alexa global
'http://www.mugshots.com/',
# Why: #9802 in Alexa global
'http://www.allabolag.se/',
# Why: #9803 in Alexa global
'http://www.parentsconnect.com/',
# Why: #9804 in Alexa global
'http://www.sina.cn/',
# Why: #9805 in Alexa global
'http://www.ibis.com/',
# Why: #9806 in Alexa global
'http://find.blog.co.uk/',
# Why: #9807 in Alexa global
'http://www.findcheaters.com/',
# Why: #9808 in Alexa global
'http://www.telly.com/',
# Why: #9809 in Alexa global
'http://www.alphacoders.com/',
# Why: #9810 in Alexa global
'http://www.sciencenet.cn/',
# Why: #9811 in Alexa global
'http://www.sreality.cz/',
# Why: #9812 in Alexa global
'http://www.wall-street-exposed.com/',
# Why: #9813 in Alexa global
'http://www.mizhe.com/',
# Why: #9814 in Alexa global
'http://www.telugumatrimony.com/',
# Why: #9815 in Alexa global
'http://www.220tube.com/',
# Why: #9816 in Alexa global
'http://www.gboxapp.com/',
# Why: #9817 in Alexa global
'http://www.activeden.net/',
# Why: #9818 in Alexa global
'http://www.worldsex.com/',
# Why: #9819 in Alexa global
'http://www.tdscpc.gov.in/',
# Why: #9821 in Alexa global
'http://www.mlbtraderumors.com/',
# Why: #9822 in Alexa global
'http://www.top-channel.tv/',
# Why: #9823 in Alexa global
'http://www.publiekeomroep.nl/',
# Why: #9824 in Alexa global
'http://www.flvs.net/',
# Why: #9825 in Alexa global
'http://www.inwi.ma/',
# Why: #9826 in Alexa global
'http://www.web-ip.ru/',
# Why: #9827 in Alexa global
'http://www.er7mne.com/',
# Why: #9828 in Alexa global
'http://www.valueclickmedia.com/',
# Why: #9829 in Alexa global
'http://www.1pondo.tv/',
# Why: #9830 in Alexa global
'http://www.capcom.co.jp/',
# Why: #9831 in Alexa global
'http://www.covers.com/',
# Why: #9832 in Alexa global
'http://www.be2.it/',
# Why: #9833 in Alexa global
'http://www.e-cigarette-forum.com/',
# Why: #9834 in Alexa global
'http://www.himarin.net/',
# Why: #9835 in Alexa global
'http://www.indiainfoline.com/',
# Why: #9836 in Alexa global
'http://www.51gxqm.com/',
# Why: #9837 in Alexa global
'http://www.sebank.se/',
# Why: #9838 in Alexa global
'http://www.18inhd.com/',
# Why: #9839 in Alexa global
'http://www.unionbankonline.co.in/',
# Why: #9840 in Alexa global
'http://www.filetram.com/',
# Why: #9841 in Alexa global
'http://www.santasporngirls.com/',
# Why: #9842 in Alexa global
'http://www.drupal.ru/',
# Why: #9843 in Alexa global
'http://www.tokfm.pl/',
# Why: #9844 in Alexa global
'http://www.steamgifts.com/',
# Why: #9845 in Alexa global
'http://www.residentadvisor.net/',
# Why: #9846 in Alexa global
'http://www.magento.com/',
# Why: #9847 in Alexa global
'http://www.28.com/',
# Why: #9848 in Alexa global
'http://www.style.com/',
# Why: #9849 in Alexa global
'http://www.nikkei.co.jp/',
# Why: #9850 in Alexa global
'http://www.alitalia.com/',
# Why: #9851 in Alexa global
'http://www.vudu.com/',
# Why: #9852 in Alexa global
'http://www.underarmour.com/',
# Why: #9853 in Alexa global
'http://www.wine-searcher.com/',
# Why: #9854 in Alexa global
'http://www.indiaproperty.com/',
# Why: #9855 in Alexa global
'http://www.bet365affiliates.com/',
# Why: #9856 in Alexa global
'http://www.cnnewmusic.com/',
# Why: #9857 in Alexa global
'http://www.longdo.com/',
# Why: #9858 in Alexa global
'http://www.destructoid.com/',
# Why: #9859 in Alexa global
'http://diyifanwen.com/',
# Why: #9860 in Alexa global
'http://www.logic-immo.com/',
# Why: #9861 in Alexa global
'http://www.mate1.com/',
# Why: #9862 in Alexa global
'http://www.pissedconsumer.com/',
# Why: #9863 in Alexa global
'http://www.blocked-website.com/',
# Why: #9864 in Alexa global
'http://www.cremonamostre.it/',
# Why: #9865 in Alexa global
'http://www.sayidaty.net/',
# Why: #9866 in Alexa global
'http://www.globalewallet.com/',
# Why: #9867 in Alexa global
'http://www.maxgames.com/',
# Why: #9868 in Alexa global
'http://www.auctionzip.com/',
# Why: #9870 in Alexa global
'http://www.aldaniti.net/',
# Why: #9871 in Alexa global
'http://www.workle.ru/',
# Why: #9872 in Alexa global
'http://www.arduino.cc/',
# Why: #9873 in Alexa global
'http://www.buenosaires.gob.ar/',
# Why: #9874 in Alexa global
'http://www.overtenreps.com/',
# Why: #9876 in Alexa global
'http://www.enalquiler.com/',
# Why: #9877 in Alexa global
'http://www.gazetadopovo.com.br/',
# Why: #9878 in Alexa global
'http://www.hftogo.com/',
# Why: #9879 in Alexa global
'http://www.usana.com/',
# Why: #9880 in Alexa global
'http://www.bancochile.cl/',
# Why: #9881 in Alexa global
'http://www.on24.com/',
# Why: #9882 in Alexa global
'http://www.samenblog.com/',
# Why: #9883 in Alexa global
'http://www.goindigo.in/',
# Why: #9884 in Alexa global
'http://www.iranvij.ir/',
# Why: #9885 in Alexa global
'http://www.postfinance.ch/',
# Why: #9886 in Alexa global
'http://www.grupobancolombia.com/',
# Why: #9887 in Alexa global
'http://www.flycell.pe/',
# Why: #9888 in Alexa global
'http://www.sobesednik.ru/',
# Why: #9889 in Alexa global
'http://www.banglalionwimax.com/',
# Why: #9890 in Alexa global
'http://www.yasni.com/',
# Why: #9891 in Alexa global
'http://www.diziizle.net/',
# Why: #9892 in Alexa global
'http://www.publichd.se/',
# Why: #9893 in Alexa global
'http://www.socialsurveycenter.com/',
# Why: #9894 in Alexa global
'http://www.blockbuster.com/',
# Why: #9895 in Alexa global
'http://www.el-ahly.com/',
# Why: #9896 in Alexa global
'http://www.1gb.ru/',
# Why: #9897 in Alexa global
'http://www.utah.edu/',
# Why: #9898 in Alexa global
'http://www.dziennik.pl/',
# Why: #9899 in Alexa global
'http://www.tizerads.com/',
# Why: #9901 in Alexa global
'http://www.global-free-classified-ads.com/',
# Why: #9902 in Alexa global
'http://www.afp.com/',
# Why: #9903 in Alexa global
'http://www.tiberiumalliances.com/',
# Why: #9904 in Alexa global
'http://www.worldstaruncut.com/',
# Why: #9905 in Alexa global
'http://www.watchfreeinhd.com/',
# Why: #9906 in Alexa global
'http://www.5278.cc/',
# Why: #9907 in Alexa global
'http://www.azdrama.info/',
# Why: #9908 in Alexa global
'http://fjsen.com/',
# Why: #9909 in Alexa global
'http://www.fandongxi.com/',
# Why: #9910 in Alexa global
'http://www.spicytranny.com/',
# Why: #9911 in Alexa global
'http://www.parsonline.net/',
# Why: #9912 in Alexa global
'http://www.libreoffice.org/',
# Why: #9913 in Alexa global
'http://www.atlassian.com/',
# Why: #9914 in Alexa global
'http://www.europeantour.com/',
# Why: #9915 in Alexa global
'http://www.smartsource.com/',
# Why: #9916 in Alexa global
'http://www.ashford.edu/',
# Why: #9917 in Alexa global
'http://www.moo.com/',
# Why: #9918 in Alexa global
'http://www.bplaced.net/',
# Why: #9919 in Alexa global
'http://www.themify.me/',
# Why: #9920 in Alexa global
'http://www.holidaypromo.info/',
# Why: #9921 in Alexa global
'http://www.nta.go.jp/',
# Why: #9922 in Alexa global
'http://www.kanglu.com/',
# Why: #9923 in Alexa global
'http://www.yicai.com/',
# Why: #9924 in Alexa global
'http://www.classesusa.com/',
# Why: #9925 in Alexa global
'http://www.huoche.net/',
# Why: #9926 in Alexa global
'http://www.linkomanija.net/',
# Why: #9927 in Alexa global
'http://www.blog.de/',
# Why: #9928 in Alexa global
'http://www.vw.com.tr/',
# Why: #9929 in Alexa global
'http://www.worldgmn.com/',
# Why: #9930 in Alexa global
'http://www.tommy.com/',
# Why: #9931 in Alexa global
'http://www.100bt.com/',
# Why: #9932 in Alexa global
'http://www.springsource.org/',
# Why: #9933 in Alexa global
'http://www.betfairinvest.com/',
# Why: #9934 in Alexa global
'http://www.broker.to/',
# Why: #9935 in Alexa global
'http://www.islamstory.com/',
# Why: #9937 in Alexa global
'http://www.sparebank1.no/',
# Why: #9938 in Alexa global
'http://www.towleroad.com/',
# Why: #9939 in Alexa global
'http://www.jetcost.com/',
# Why: #9940 in Alexa global
'http://www.pinping.com/',
# Why: #9941 in Alexa global
'http://www.millenniumbcp.pt/',
# Why: #9942 in Alexa global
'http://www.vikatan.com/',
# Why: #9943 in Alexa global
'http://www.dorkly.com/',
# Why: #9944 in Alexa global
'http://www.clubedohardware.com.br/',
# Why: #9945 in Alexa global
'http://www.fclub.cn/',
# Why: #9946 in Alexa global
'http://www.any.gs/',
# Why: #9947 in Alexa global
'http://www.danskebank.dk/',
# Why: #9948 in Alexa global
'http://www.tvmongol.com/',
# Why: #9949 in Alexa global
'http://www.ahnegao.com.br/',
# Why: #9950 in Alexa global
'http://www.filipinocupid.com/',
# Why: #9951 in Alexa global
'http://www.casacinemas.com/',
# Why: #9952 in Alexa global
'http://www.standvirtual.com/',
# Why: #9953 in Alexa global
'http://www.nbg.gr/',
# Why: #9954 in Alexa global
'http://www.onlywire.com/',
# Why: #9955 in Alexa global
'http://www.megacurioso.com.br/',
# Why: #9956 in Alexa global
'http://www.elaph.com/',
# Why: #9957 in Alexa global
'http://www.xvideos-field5.com/',
# Why: #9958 in Alexa global
'http://www.base.de/',
# Why: #9959 in Alexa global
'http://www.zzstream.li/',
# Why: #9960 in Alexa global
'http://www.qype.co.uk/',
# Why: #9961 in Alexa global
'http://www.ubergizmo.com/',
# Why: #9963 in Alexa global
'http://www.habervaktim.com/',
# Why: #9965 in Alexa global
'http://www.nationaljournal.com/',
# Why: #9966 in Alexa global
'http://www.fanslave.com/',
# Why: #9967 in Alexa global
'http://www.agreementfind.com/',
# Why: #9968 in Alexa global
'http://www.unionbankph.com/',
# Why: #9969 in Alexa global
'http://www.hometalk.com/',
# Why: #9970 in Alexa global
'http://www.hotnigerianjobs.com/',
# Why: #9971 in Alexa global
'http://www.infoq.com/',
# Why: #9972 in Alexa global
'http://www.matalan.co.uk/',
# Why: #9973 in Alexa global
'http://www.hottopic.com/',
# Why: #9974 in Alexa global
'http://www.hammihan.com/',
# Why: #9976 in Alexa global
'http://www.stsoftware.biz/',
# Why: #9977 in Alexa global
'http://www.elimparcial.com/',
# Why: #9978 in Alexa global
'http://www.lingualeo.ru/',
# Why: #9979 in Alexa global
'http://www.firstdirect.com/',
# Why: #9980 in Alexa global
'http://www.linkprosperity.com/',
# Why: #9982 in Alexa global
'http://www.ele.me/',
# Why: #9983 in Alexa global
'http://www.beep.com/',
# Why: #9984 in Alexa global
'http://w-t-f.jp/',
# Why: #9985 in Alexa global
'http://www.netcombo.com.br/',
# Why: #9986 in Alexa global
'http://www.meme.li/',
# Why: #9987 in Alexa global
'http://www.privateproperty.co.za/',
# Why: #9988 in Alexa global
'http://www.wunderlist.com/',
# Why: #9989 in Alexa global
'http://www.designyoutrust.com/',
# Why: #9990 in Alexa global
'http://century21.com/',
# Why: #9991 in Alexa global
'http://www.huuto.net/',
# Why: #9992 in Alexa global
'http://www.adsoftheworld.com/',
# Why: #9993 in Alexa global
'http://www.kabu.co.jp/',
# Why: #9994 in Alexa global
'http://www.vouchercodes.co.uk/',
# Why: #9995 in Alexa global
'http://www.allyou.com/',
# Why: #9996 in Alexa global
'http://www.mastemplate.com/',
# Why: #9997 in Alexa global
'http://www.bolha.com/',
# Why: #9998 in Alexa global
'http://www.tastyplay.com/',
# Why: #9999 in Alexa global
'http://www.busuk.org/']
for url in urls_list:
self.AddPage(Alexa1To10000Page(url, self))
| chromium2014/src | tools/perf/page_sets/alexa1-10000.py | Python | bsd-3-clause | 666,348 | [
"ADF",
"CASINO",
"MOE"
] | 5539744288798ad940b017d7b8065616e43a3a6174ee310070d12953614c2786 |
# coding=utf-8
"""Base class for Ladybug Data Collections."""
from __future__ import division
from .header import Header
from .datatype.base import DataTypeBase
from .datatype import TYPESDICT, BASETYPES
try:
from collections.abc import Iterable # python < 3.7
except ImportError:
from collections import Iterable # python >= 3.8
from string import ascii_lowercase
import math
try:
from itertools import izip as zip # python 2
except ImportError:
xrange = range # python 3
class BaseCollection(object):
"""Base class for all Data Collections.
Args:
header: A Ladybug Header object.
values: A list of values.
datetimes: A list of Ladybug DateTime objects that aligns with
the list of values.
"""
__slots__ = ('_header', '_values', '_datetimes', '_validated_a_period')
_collection_type = None
_mutable = True
_enumeration = None
def __init__(self, header, values, datetimes):
"""Initialize base collection.
"""
assert isinstance(header, Header), \
'header must be a Ladybug Header object. Got {}'.format(type(header))
assert isinstance(datetimes, Iterable) \
and not isinstance(datetimes, (str, dict, bytes, bytearray)), \
'datetimes should be a list or tuple. Got {}'.format(type(datetimes))
self._header = header
self._datetimes = tuple(datetimes)
self.values = values
self._validated_a_period = False
@classmethod
def from_dict(cls, data):
"""Create a Data Collection from a dictionary.
Args:
data: A python dictionary in the following format
.. code-block:: python
{
"header": {}, # A Ladybug Header
"values": [], # An array of values
"datetimes": [], # An array of datetimes
"validated_a_period": True # Boolean for whether header analysis_period
# is valid
}
"""
assert 'header' in data, 'Required keyword "header" is missing!'
assert 'values' in data, 'Required keyword "values" is missing!'
assert 'datetimes' in data, 'Required keyword "datetimes" is missing!'
coll = cls(Header.from_dict(data['header']), data['values'], data['datetimes'])
if 'validated_a_period' in data:
coll._validated_a_period = data['validated_a_period']
return coll
@property
def header(self):
"""Get the header for this collection."""
return self._header
@property
def datetimes(self):
"""Get a tuple of datetimes for this collection, which align with the values."""
return self._datetimes
@property
def values(self):
"""Get a tuple of numerical values for this collection."""
return tuple(self._values)
@values.setter
def values(self, values):
self._check_values(values)
self._values = list(values)
@property
def validated_a_period(self):
"""Boolean for whether the header analysis_period is validated against datetimes.
This will always be True when a collection is derived from a continuous one.
"""
return self._validated_a_period
@property
def bounds(self):
"""Get a tuple of two value as (min, max) of the data."""
return (min(self._values), max(self._values))
@property
def min(self):
"""Get the min of the Data Collection values."""
return min(self._values)
@property
def max(self):
"""Get the max of the Data Collection values."""
return max(self._values)
@property
def average(self):
"""Get the average of the Data Collection values."""
return sum(self._values) / len(self._values)
@property
def median(self):
"""Get the median of the Data Collection values."""
return self._percentile(self._values, 50)
@property
def total(self):
"""Get the total of the Data Collection values."""
return sum(self._values)
def convert_to_unit(self, unit):
"""Convert the Data Collection to the input unit.
Note that this mutates the data collection object, which can have unintended
consequences depending on how the data collection is used. Use to_unit to
get a new instance of a collection without mutating this one.
"""
self._values = self._header.data_type.to_unit(
self._values, unit, self._header.unit)
self._header._unit = unit
def convert_to_ip(self):
"""Convert the Data Collection to IP units.
Note that this mutates the data collection object, which can have unintended
consequences depending on how the data collection is used. Use to_ip to
get a new instance of a collection without mutating this one.
"""
self._values, self._header._unit = self._header.data_type.to_ip(
self._values, self._header.unit)
def convert_to_si(self):
"""Convert the Data Collection to SI units.
Note that this mutates the data collection object, which can have unintended
consequences depending on how the data collection is used. Use to_si to
get a new instance of a collection without mutating this one.
"""
self._values, self._header._unit = self._header.data_type.to_si(
self._values, self._header.unit)
def to_unit(self, unit):
"""Get a Data Collection in the input unit.
Args:
unit: Text for the unit to convert the data to (eg. 'C' or 'kWh'). This
unit must appear under the data collection's header.data_type.units.
"""
new_data_c = self.duplicate()
new_data_c.convert_to_unit(unit)
return new_data_c
def to_ip(self):
"""Get a Data Collection in IP units."""
new_data_c = self.duplicate()
new_data_c.convert_to_ip()
return new_data_c
def to_si(self):
"""Get a Data Collection in SI units."""
new_data_c = self.duplicate()
new_data_c.convert_to_si()
return new_data_c
def is_in_data_type_range(self, raise_exception=True):
"""Check if collection values are in physically possible ranges for the data_type.
If this method returns False, the collection's values are physically or
mathematically impossible for the data_type (eg. temperature below
absolute zero).
Args:
raise_exception: Boolean to note whether an exception should be raised
if an impossible value is found. (Default: True).
"""
return self._header.data_type.is_in_range(
self._values, self._header.unit, raise_exception)
def to_mutable(self):
"""Get a mutable version of this collection."""
return self.duplicate()
def to_immutable(self):
"""Get an immutable version of this collection."""
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration['immutable'][self._collection_type]
new_obj = col_obj(self.header, self.values, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def normalize_by_area(self, area, area_unit):
"""Get a Data Collection that is normalized by an area value.
Note that this method will raise a ValueError if the data type in the header
of the data collection does not have a normalized_type. Also note that a
ZeroDivisionError will be raised if the input area is equal to 0.
Args:
area: Number representing area by which all of the data is normalized.
area_unit: Text for the units that the area value is in. Acceptable
inputs include 'm2', 'ft2' and any other unit that is supported
in the normalized_type of this datacollection's data type.
"""
# get an instance of the normalized data type
head = self.header
norm_type_class = head.data_type.normalized_type
assert norm_type_class is not None, \
'Data type "{}" cannot be normalized by area to yield a useful '\
'metric.'.format(head.data_type)
# create the new data collection and assign normalized values
new_data_c = self.duplicate()
new_data_c._values = [val / area for val in self.values]
# normalize the data type and unit in the header
new_data_c._header._unit = '{}/{}'.format(head.unit, area_unit)
new_data_c._header._data_type = norm_type_class()
new_data_c._header._data_type.is_unit_acceptable(new_data_c._header._unit)
if 'type' in head.metadata: # key used to identify sophisticated data types
new_data_c._header.metadata['type'] = \
'{} {}'.format(new_data_c._header.metadata['type'], 'Intensity')
return new_data_c
def highest_values(self, count):
"""Get a list of the the x highest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the largest values of a data collection occur. For example,
there is a European daylight code that requires an analysis for the hours
of the year with the greatest exterior illuminance level. This method
can be used to help build a schedule for such a study.
Args:
count: Integer representing the number of highest values to account for.
Returns:
A tuple with two elements.
- highest_values:
The n highest values in data list, ordered from
highest to lowest.
- highest_values_index:
Indices of the n highest values in data
list, ordered from highest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be smaller than or equal to values length. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
highest_values = sorted(self._values, reverse=True)[0:count]
highest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k],
reverse=True)[0:count]
return highest_values, highest_values_index
def lowest_values(self, count):
"""Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
A tuple with two elements.
- highest_values:
The n lowest values in data list, ordered from
lowest to lowest.
- lowest_values_index:
Indices of the n lowest values in data
list, ordered from lowest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be <= to Data Collection len. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
lowest_values = sorted(self._values)[0:count]
lowest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k])[0:count]
return lowest_values, lowest_values_index
def percentile(self, percentile):
"""Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Returns:
The Data Collection value at the input percentile
"""
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile)
def filter_by_conditional_statement(self, statement):
"""Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Returns:
A new Data Collection containing only the filtered data
"""
_filt_values, _filt_datetimes = self._filter_by_statement(statement)
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration['mutable'][self._collection_type]
try:
collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)
except AssertionError as e:
raise AssertionError('No value meets the conditional statement.'
'\n{}'.format(e))
collection._validated_a_period = self._validated_a_period
return collection
def filter_by_pattern(self, pattern):
"""Filter the Data Collection based on a list of booleans.
Args:
pattern: A list of True/False values. Typically, this is a list
with a length matching the length of the Data Collections values
but it can also be a pattern to be repeated over the Data Collection.
Returns:
A new Data Collection with filtered data
"""
_filt_values, _filt_datetimes = self._filter_by_pattern(pattern)
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection
def is_collection_aligned(self, data_collection):
"""Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Returns:
True if collections are aligned, False if not aligned
"""
if self._collection_type != data_collection._collection_type:
return False
elif len(self.values) != len(data_collection.values):
return False
elif self.datetimes != data_collection.datetimes:
return False
else:
return True
def get_aligned_collection(self, value=0, data_type=None, unit=None, mutable=None):
"""Get a collection aligned with this one composed of one repeated value.
Aligned Data Collections are of the same Data Collection class, have the same
number of values and have matching datetimes.
Args:
value: A value to be repeated in the aliged collection values or
A list of values that has the same length as this collection.
Default: 0.
data_type: The data type of the aligned collection. Default is to
use the data type of this collection.
unit: The unit of the aligned collection. Default is to
use the unit of this collection or the base unit of the
input data_type (if it exists).
mutable: An optional Boolean to set whether the returned aligned
collection is mutable (True) or immutable (False). The default is
None, which will simply set the aligned collection to have the
same mutability as the starting collection.
"""
# set up the header of the new collection
header = self._check_aligned_header(data_type, unit)
# set up the values of the new collection
values = self._check_aligned_value(value)
# get the correct base class for the aligned collection (mutable or immutable)
if mutable is None:
collection = self.__class__(header, values, self.datetimes)
else:
if self._enumeration is None:
self._get_mutable_enumeration()
if not mutable:
col_obj = self._enumeration['immutable'][self._collection_type]
else:
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(header, values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection
def duplicate(self):
"""Get a copy of this Data Collection."""
return self.__copy__()
def to_dict(self):
"""Convert Data Collection to a dictionary."""
return {
'header': self.header.to_dict(),
'values': self._values,
'datetimes': self.datetimes,
'validated_a_period': self._validated_a_period,
'type': self.__class__.__name__
}
@staticmethod
def filter_collections_by_statement(data_collections, statement):
"""Generate a filtered data collections according to a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Returns:
collections -- A list of Data Collections that have been filtered based
on the statement.
"""
pattern = BaseCollection.pattern_from_collections_and_statement(
data_collections, statement)
try:
collections = [coll.filter_by_pattern(pattern) for coll in data_collections]
except AssertionError as e:
raise AssertionError('No value meets the conditional statement.'
'\n{}'.format(e))
return collections
@staticmethod
def pattern_from_collections_and_statement(data_collections, statement):
"""Generate a list of booleans from data collections and a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Returns:
pattern -- A list of True/False booleans with the length of the
Data Collections where True meets the conditional statement
and False does not.
"""
BaseCollection.are_collections_aligned(data_collections)
correct_var = BaseCollection._check_conditional_statement(
statement, len(data_collections))
# replace the operators of the statement with non-alphanumeric characters
# necessary to avoid replacing the characters of the operators
num_statement_clean = BaseCollection._replace_operators(statement)
pattern = []
for i in xrange(len(data_collections[0])):
num_statement = num_statement_clean
# replace the variable names with their numerical values
for j, coll in enumerate(data_collections):
var = correct_var[j]
num_statement = num_statement.replace(var, str(coll[i]))
# put back the operators
num_statement = BaseCollection._restore_operators(num_statement)
pattern.append(eval(num_statement, {}))
return pattern
@staticmethod
def are_collections_aligned(data_collections, raise_exception=True):
"""Test if a series of Data Collections are aligned with one another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collections: A list of Data Collections for which you want to
test if they are al aligned with one another.
Returns:
True if collections are aligned, False if not aligned
"""
if len(data_collections) > 1:
first_coll = data_collections[0]
for coll in data_collections[1:]:
if not first_coll.is_collection_aligned(coll):
if raise_exception:
error_msg = '{} Data Collection is not aligned with '\
'{} Data Collection.'.format(
first_coll.header.data_type, coll.header.data_type)
raise ValueError(error_msg)
return False
return True
@staticmethod
def compute_function_aligned(funct, data_collections, data_type, unit):
"""Compute a function with a list of aligned data collections or individual values.
Args:
funct: A function with a single numerical value as output and one or
more numerical values as input.
data_collections: A list with a length equal to the number of arguments
for the function. Items of the list can be either Data Collections
or individual values to be used at each datetime of other collections.
data_type: An instance of a Ladybug data type that describes the results
of the funct.
unit: The units of the funct results.
Returns:
A Data Collection with the results function. If all items in this list of
data_collections are individual values, only a single value will be returned.
Usage:
.. code-block:: python
from ladybug.datacollection import HourlyContinuousCollection
from ladybug.epw import EPW
from ladybug.psychrometrics import humid_ratio_from_db_rh
from ladybug.datatype.percentage import HumidityRatio
epw_file_path = './epws/denver.epw'
denver_epw = EPW(epw_file_path)
pressure_at_denver = 85000
hr_inputs = [denver_epw.dry_bulb_temperature,
denver_epw.relative_humidity,
pressure_at_denver]
humid_ratio = HourlyContinuousCollection.compute_function_aligned(
humid_ratio_from_db_rh, hr_inputs, HumidityRatio(), 'fraction')
# humid_ratio will be a Data Collection of humidity ratios at Denver
"""
# check that all inputs are either data collections or floats
data_colls = []
for i, func_input in enumerate(data_collections):
if isinstance(func_input, BaseCollection):
data_colls.append(func_input)
else:
try:
data_collections[i] = float(func_input)
except ValueError:
raise TypeError('Expected a number or a Data Collection. '
'Got {}'.format(type(func_input)))
# run the function and return the result
if len(data_colls) == 0:
return funct(*data_collections)
else:
BaseCollection.are_collections_aligned(data_colls)
val_len = len(data_colls[0].values)
for i, col in enumerate(data_collections):
data_collections[i] = [col] * val_len if isinstance(col, float) else col
result = data_colls[0].get_aligned_collection(data_type=data_type, unit=unit)
for i in xrange(val_len):
result[i] = funct(*[col[i] for col in data_collections])
return result
def _time_aggregated_collection(self, timestep):
"""Get a time-aggregated version of this collection."""
# get an instance of the time aggregated data type
head = self.header
time_class = head.data_type.time_aggregated_type
assert time_class is not None, \
'Data type "{}" cannot be time aggregated to yield a useful '\
'metric.'.format(head.data_type)
# create the new data collection and assign normalized values
new_data_c = self.to_unit(head.data_type.units[0])
factor = head.data_type.time_aggregated_factor / timestep
new_data_c._values = [val * factor for val in new_data_c._values]
new_data_c._header._data_type = time_class()
new_data_c._header._unit = new_data_c._header._data_type.units[0]
return new_data_c
def _time_rate_of_change_collection(self, timestep):
"""Get a time-rate-of-change version of this collection."""
# get an instance of the time aggregated data type
head = self.header
dat_type, time_class = head.data_type, None
# first see if there's a specific data type for the current one
for typ_clss in TYPESDICT.values():
if typ_clss._time_aggregated_type is None:
continue
elif dat_type.__class__ == typ_clss._time_aggregated_type:
time_class = typ_clss
break
# then, check to see if there's any base type
if time_class is None:
for typ_clss_name in BASETYPES:
typ_clss = TYPESDICT[typ_clss_name]
if typ_clss._time_aggregated_type is None:
continue
elif isinstance(dat_type, typ_clss._time_aggregated_type):
time_class = typ_clss
break
# if nothing was found, throw an error
if time_class is None:
raise ValueError('Data type "{}" does not have a time-rate-of-'
'change metric.'.format(head.data_type))
# create the new data collection and assign normalized values
new_data_c = self.to_unit(head.data_type.units[0])
factor = typ_clss._time_aggregated_factor / timestep
new_data_c._values = [val / factor for val in new_data_c._values]
new_data_c._header._data_type = time_class()
new_data_c._header._unit = new_data_c._header._data_type.units[0]
return new_data_c
@staticmethod
def _check_conditional_statement(statement, num_collections):
"""Method to check conditional statements to be sure that they are valid.
Args:
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
num_collections: An integer representing the number of data collections
that the statement will be evaluating.
Returns:
correct_var -- A list of the correct variable names that should be
used within the statement (eg. ['a', 'b', 'c'])
"""
# Determine what the list of variables should be based on the num_collections
correct_var = list(ascii_lowercase)[:num_collections]
# Clean out the operators of the statement
st_statement = BaseCollection._remove_operators(statement)
parsed_st = [s for s in st_statement if s.isalpha()]
# Perform the check
for var in parsed_st:
if var not in correct_var:
raise ValueError(
'Invalid conditional statement: {}\n '
'Statement should be a valid Python statement'
' and the variables should be named as follows: {}'.format(
statement, ', '.join(correct_var))
)
return correct_var
@staticmethod
def _remove_operators(statement):
"""Remove logical operators from a statement."""
return statement.lower().replace("and", "").replace("or", "") \
.replace("not", "").replace("in", "").replace("is", "")
@staticmethod
def _replace_operators(statement):
"""Replace logical operators of a statement with non-alphanumeric characters."""
return statement.lower().replace("and", "&&").replace("or", "||") \
.replace("not", "~").replace("in", "<<").replace("is", "$")
@staticmethod
def _restore_operators(statement):
"""Restore python logical operators from previously replaced ones."""
return statement.replace("&&", "and").replace("||", "or") \
.replace("~", "not").replace("<<", "in").replace("$", "is")
@staticmethod
def linspace(start, stop, num):
"""Get evenly spaced numbers calculated over the interval start, stop.
This method is similar to native Python range except that it takes a number of
divisions instead of a step. It is also equivalent to numpy's linspace method.
Args:
start: Start interval index as integer or float.
stop: Stop interval index as integer or float.
num: Number of divisions as integer.
Returns:
A list of numbers.
Usage:
.. code-block:: python
from BaseCollection import linspace
linspace(0, 5, 6)
# >> [0., 1., 2., 3., 4., 5.]
"""
try:
delta = stop - start
return [i * (delta / (num - 1)) + start for i in range(num)]
except ZeroDivisionError:
return [start]
@staticmethod
def arange(start, stop, step):
"""Return evenly spaced fractional or whole values within a given interval.
This function acts like the Python range method, but can also account for
fractional values. It is equivalent to the numpy.arange function.
Args:
start: Number for inclusive start of interval.
stop: Number for exclusive end of interval.
step: Number for step size of interval.
Returns:
Generator of evenly spaced values.
Usage:
.. code-block:: python
from BaseCollection import arange
arange(1, 351, 50)
# >> [1, 51, 101, 151, 201, 251, 301]
"""
val = start
if start <= stop:
def ineq(a, b): return a < b
else:
def ineq(a, b): return a > b
while ineq(val, stop) and abs(val - stop) > 1e-10:
yield val
val += step
@staticmethod
def histogram(values, bins, key=None):
"""Compute the frequency histogram from a list of values.
The data is binned inclusive of the lower bound but exclusive of the upper bound
for intervals. See usage for example of losing the last number in the following
dataset because of exclusive upper bound.
Args:
values: Set of numerical data as a list.
bins: A monotonically increasing array of uniform-width bin edges, excluding
the rightmost edge.
key: Optional parameter to define key to bin values by, as a function. If not
provided, the histogram will be binned by the value.
Returns:
A list of lists representing the ordered values binned by frequency.
Usage:
.. code-block:: python
from BaseCollection import histogram
# Simple example
histogram([0, 0, 0.9, 1, 1.5, 1.99, 2, 3], (0, 1, 2, 3))
# >> [[0, 0, 0.9], [1, 1.5, 1.99], [2]]
# With key parameter
histogram(
zip([0, 0, 0.9, 1, 1.5, 1.99],
['a', 'b', 'c', 'd', 'e', 'f']),
(0, 1, 2), key=lambda k: k[0])
# >> [[(0, 'a'), (0, 'b'), (0.9, 'c')], [(1, 'd'), (1.5, 'e'), (1.99, 'f')]]
"""
if key is None:
def key(v):
return v
vals = sorted(values, key=key)
min_bound, max_bound = min(bins), max(bins)
bin_bound_num = len(bins)
# Init histogram bins
hist = [[] for i in range(bin_bound_num - 1)]
bin_index = 0
for val in vals:
k = key(val)
# Ignore values out of range
if k < min_bound or k > max_bound:
continue
# This loop will iterate through the bin upper bounds.
# If the value is within the bounds, the lower bound
# of the bin_index is updated, and the loop is broken
for i in range(bin_index, bin_bound_num - 1):
if k < bins[i + 1]:
hist[i].append(val)
bin_index = i
break
return hist
@staticmethod
def histogram_circular(values, bins, hist_range=None, key=None):
"""Compute the frequency histogram from a list of circular values.
Circular values refers to a set of values where there is no distinction between
values at the lower or upper end of the range, for example angles in a circle, or
time. The data is binned inclusive of the lower bound but exclusive of the upper
bound for intervals.
Args:
values: Set of numerical data as a list.
bins: An array of uniform-width bin edges, excluding the rightmost edge.
These values do not have to be monotonically increasing.
hist_range: Optional parameter to define the lower and upper range of the
histogram as a tuple of numbers. If not provided the range is
``(min(key(values)), max(key(values))+1)``.
key: Optional parameter to define key to bin values by, as a function. If not
provided, the histogram will be binned by the value.
Returns:
A list of lists representing the ordered values binned by frequency.
Usage:
.. code-block:: python
from BaseCollection import histogram_circular
histogram_circular([358, 359, 0, 1, 2, 3], (358, 0, 3))
# >> [[358, 359], [0, 1, 2]]
"""
if key is None:
def key(v):
return v
vals = sorted(values, key=key)
if hist_range is None:
hist_range = (key(vals[0]), key(vals[-1]) + 1)
bin_bound_num = len(bins) - 1
# Init histogram bins
hist = [[] for i in range(bin_bound_num)]
for val in vals:
k = key(val)
# Ignore values out of range
if k < hist_range[0] or k >= hist_range[1]:
continue
# This loop will iterate through the bin upper bounds.
# If the value is within the bounds, the loop is broken.
# Since values at the end of the list can still be binned
# into the earlier histogram bars for circular
# data, we don't update the bin_index.
for i in range(bin_bound_num):
if bins[i] < bins[i + 1]:
if k >= bins[i] and k < bins[i + 1]:
hist[i].append(val)
break
else:
# If the interval starts data from the end of the list,
# split the conditional checks into two to check two
# intervals.
interval1 = (k <= hist_range[1] and k >= bins[i])
interval2 = (k < bins[i + 1] and k >= hist_range[0])
if interval1 or interval2:
hist[i].append(val)
break
return hist
def _filter_by_statement(self, statement):
"""Filter the data collection based on a conditional statement."""
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes
def _filter_by_pattern(self, pattern):
"""Filter the Filter the Data Collection based on a list of booleans."""
try:
_len = len(pattern)
except TypeError:
raise TypeError("pattern is not a list of Booleans. Got {}".format(
type(pattern)))
_filt_values = [d for i, d in enumerate(self._values) if pattern[i % _len]]
_filt_datetimes = [d for i, d in enumerate(self.datetimes) if pattern[i % _len]]
return _filt_values, _filt_datetimes
def _check_values(self, values):
"""Check values whenever they come through the values setter."""
assert isinstance(values, Iterable) and not \
isinstance(values, (str, dict, bytes, bytearray)), \
'values should be a list or tuple. Got {}'.format(type(values))
assert len(values) == len(self.datetimes), \
'Length of values list must match length of datetimes list. ' \
'{} != {}'.format(len(values), len(self.datetimes))
assert len(values) > 0, 'Data Collection must include at least one value'
def _check_aligned_header(self, data_type, unit):
"""Check the header inputs whenever get_aligned_collection is called."""
if data_type is not None:
assert isinstance(data_type, DataTypeBase), \
'data_type must be a Ladybug DataType. Got {}'.format(type(data_type))
if unit is None:
unit = data_type.units[0]
else:
data_type = self.header.data_type
unit = unit or self.header.unit
return Header(data_type, unit, self.header.analysis_period, self.header.metadata)
def _check_aligned_value(self, value):
"""Check the value input whenever get_aligned_collection is called."""
if isinstance(value, Iterable) and not isinstance(
value, (str, dict, bytes, bytearray)):
assert len(value) == len(self._values), "Length of value ({}) must match "\
"the length of this collection's values ({})".format(
len(value), len(self._values))
values = value
else:
values = [value] * len(self._values)
return values
def _percentile(self, values, percent, key=lambda x: x):
"""Find the percentile of a list of values.
Args:
values: A list of values for which percentiles are desired
percent: A float value from 0 to 100 representing the requested percentile.
key: optional key function to compute value from each element of N.
Returns:
The percentile of the values
"""
vals = sorted(values)
k = (len(vals) - 1) * (percent / 100)
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(vals[int(k)])
d0 = key(vals[int(f)]) * (c - k)
d1 = key(vals[int(c)]) * (k - f)
return d0 + d1
def _average(self, vals):
return sum(vals) / len(vals)
def _total(self, vals):
return sum(vals)
def _get_percentile_function(self, percentile):
def percentile_function(vals):
return self._percentile(vals, percentile)
return percentile_function
def _get_mutable_enumeration(self):
self._enumeration = {'mutable': {}, 'immutable': {}}
for clss in self._all_subclasses(BaseCollection):
if clss._mutable:
self._enumeration['mutable'][clss._collection_type] = clss
else:
self._enumeration['immutable'][clss._collection_type] = clss
def _all_subclasses(self, clss):
return set(clss.__subclasses__()).union(
[s for c in clss.__subclasses__() for s in self._all_subclasses(c)])
def __len__(self):
return len(self._values)
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __iter__(self):
return iter(self._values)
def __contains__(self, item):
return item in self._values
def __add__(self, other):
new_vals = self._add_values(other)
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def __sub__(self, other):
new_vals = self._sub_values(other)
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def __mul__(self, other):
new_vals = self._mul_values(other)
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def __div__(self, other):
new_vals = self._div_values(other)
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def __truediv__(self, other):
new_vals = self._div_values(other)
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def __neg__(self):
new_vals = [-v_1 for v_1 in self._values]
new_obj = self.__class__(self.header.duplicate(), new_vals, self.datetimes)
new_obj._validated_a_period = self._validated_a_period
return new_obj
def _add_values(self, other):
if isinstance(other, (int, float)):
new_vals = [v_1 + other for v_1 in self._values]
else:
assert self._collection_type == other._collection_type, \
'{} cannot be added to {}'.format(self.__class__, other.__class__)
assert len(self) == len(other), 'Length of DataCollections must match in ' \
'order to add them together. {} != {}'.format(len(self), len(other))
new_vals = [v_1 + v_2 for v_1, v_2 in zip(self._values, other._values)]
return new_vals
def _sub_values(self, other):
if isinstance(other, (int, float)):
new_vals = [v_1 - other for v_1 in self._values]
else:
assert self._collection_type == other._collection_type, \
'{} cannot be subtracted from {}'.format(other.__class__, self.__class__)
assert len(self) == len(other), 'Length of DataCollections must match ' \
'to subtract one from the other. {} != {}'.format(len(self), len(other))
new_vals = [v_1 - v_2 for v_1, v_2 in zip(self._values, other._values)]
return new_vals
def _mul_values(self, other):
if isinstance(other, (int, float)):
new_vals = [v_1 * other for v_1 in self._values]
else:
assert self._collection_type == other._collection_type, \
'{} cannot be multiplied by {}'.format(other.__class__, self.__class__)
assert len(self) == len(other), 'Length of DataCollections must match ' \
'to multiply them together. {} != {}'.format(len(self), len(other))
new_vals = [v_1 * v_2 for v_1, v_2 in zip(self._values, other._values)]
return new_vals
def _div_values(self, other):
if isinstance(other, (int, float)):
new_vals = [v_1 / other for v_1 in self._values]
else:
assert self._collection_type == other._collection_type, \
'{} cannot be divided by {}'.format(other.__class__, self.__class__)
assert len(self) == len(other), 'Length of DataCollections must match ' \
'to divide them. {} != {}'.format(len(self), len(other))
new_vals = [v_1 / v_2 for v_1, v_2 in zip(self._values, other._values)]
return new_vals
@property
def is_continuous(self):
"""Boolean denoting whether the data collection is continuous."""
return False
@property
def is_mutable(self):
"""Boolean denoting whether the data collection is mutable."""
return self._mutable
def __key(self):
return self.header, self.values, self.datetimes, self.validated_a_period
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__key() == other.__key()
def __ne__(self, value):
return not self.__eq__(value)
def __copy__(self):
collection = self.__class__(
self.header.duplicate(), list(self._values), self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection
def ToString(self):
"""Overwrite .NET ToString method."""
return self.__repr__()
def __repr__(self):
"""Discontinuous Collection representation."""
return "Discontinuous Data Collection\n{} ({})\n...{} values...".format(
self.header.data_type, self.header.unit, len(self._values))
| ladybug-analysis-tools/ladybug-core | ladybug/_datacollectionbase.py | Python | gpl-3.0 | 45,622 | [
"EPW"
] | b0c4dfc01a6fa9d7cda198a1eb8558b370c52db9237f2a893678aa7bc86f684f |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2012 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functionality for reading from and writing to the
adjacency list format used by Reaction Mechanism Generator (RMG).
"""
import logging
import re
from .molecule import Atom, Bond
from .group import GroupAtom, GroupBond
#import chempy.molecule.atomtype as atomtypes
bond_orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
class PeriodicSystem(object):
valence_electrons_first_period_elements = {'H':1, 'He':2}
valence_electrons_second_period_elements = {'C':4, 'N':5, 'O':6, 'Ne':8}
valence_electrons_third_period_elements = {'Si':4, 'S':6, 'Cl':7, 'Ar':8}
valence_electrons = {}
valence_electrons.update(valence_electrons_first_period_elements)
valence_electrons.update(valence_electrons_second_period_elements)
valence_electrons.update(valence_electrons_third_period_elements)
lone_pairs = {'H': 0, 'C': 0, 'N': 1, 'O': 2, 'Si':0, 'S': 2, 'Cl':3 }
class Saturator(object):
@staticmethod
def saturate(atoms):
'''
Returns a list of atoms that is extended
(and bond attributes) by saturating the valency of the non-hydrogen atoms with an
appropriate number of hydrogen atoms.
The required number of hydrogen atoms per heavy atom is determined as follows:
H's = max number of valence electrons - atom.radicalElectrons
- 2* atom.lonePairs - order - atom.charge
'''
global bond_orders
newAtoms = []
for atom in atoms:
try:
max_number_of_valence_electrons = PeriodicSystem.valence_electrons[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown orbital for atom "{0}".'.format(atom.symbol))
order = 0
for _, bond in atom.bonds.items():
order += bond_orders[bond.order]
number_of_H_to_be_added = max_number_of_valence_electrons - atom.radicalElectrons - 2* atom.lonePairs - int(order) - atom.charge
if number_of_H_to_be_added < 0:
raise InvalidAdjacencyListError('Incorrect electron configuration on atom.')
for _ in range(number_of_H_to_be_added):
a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0)
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
class ConsistencyChecker(object):
@staticmethod
def check_partial_charge(atom):
'''
Checks whether the partial charge attribute of the atom checks out with
the theoretical one:
'''
global bond_orders
valence = PeriodicSystem.valence_electrons[atom.symbol]
order = 0
for _, bond in atom.bonds.items():
order += bond_orders[bond.order]
theoretical = valence - order - atom.radicalElectrons - 2*atom.lonePairs
if atom.charge != theoretical:
raise InvalidAdjacencyListError('Invalid valency for atom {symbol} with {radicals} unpaired electrons, {lonePairs} pairs of electrons, and {charge} charge.'
.format(symbol=atom.symbol, radicals=atom.radicalElectrons, lonePairs=atom.lonePairs, charge=atom.charge))
@staticmethod
def check_multiplicity(nRad, multiplicity):
'''
Check if the parameter multiplicity is an odd number,
since the multiplicity should comply with the formula
m = 2s + 1, with s the sum of the spin [+/- 1/2) ] of the unpaired electrons
For a simple radical (nRad = 1):
s = +1/2 , m = 2 (doublet)
For a biradical, s can be either 0 [+0.5 + (-0.5) ] or 1 [+0.5 + (+0.5) ]
and m = 1 (singlet) or m = 3 (triplet).
'''
if nRad in [0,1]:
if multiplicity != (2*nRad/2 + 1):
raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
elif nRad == 2:
if not int(multiplicity) in [1,3]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
else: logging.info("Consistency checking of multiplicity of molecules with more than 2 unpaired electrons is not implemented yet!")
@staticmethod
def check_hund_rule(atom, multiplicity):
'''
It is checked whether atoms with 2 unpaired electrons on the same atom
result in a multiplicity of 3, and not 1.
Unpaired electrons in 2 different orbitals belonging to the same atom
should have the same spin, and hence, should result in a multiplicity of 3.
'''
if atom.radicalElectrons == 2 and multiplicity == 1:
raise InvalidAdjacencyListError("Violation of hund's rule. Invalid multiplicity of {0} because there is an atom with {1} unpaired electrons"
.format(multiplicity, atom.radicalElectrons))
################################################################################
class InvalidAdjacencyListError(Exception):
"""
An exception used to indicate that an RMG-style adjacency list is invalid.
Pass a string describing the reason the adjacency list is invalid
"""
pass
################################################################################
def fromOldAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a pre-June-2014 string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
It can read both "old style" that existed for years, an the "intermediate style" that
existed for a few months in 2014, with the extra column of integers for lone pairs.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('Error in {0} adjacency list: No atoms specified.'.format(adjlist.splitlines()[0]))
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Error in {1} adjacency list: species shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group(), adjlist.splitlines()[0])
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = [];
additionalLonePairs = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
if len(elecState) == 0:
raise InvalidAdjacencyListError("Error in {0} adjacency list: There must be some electronic state defined for an old adjlist".format(adjlist.splitlines()[0]))
for e in elecState:
if e == '0':
radicalElectrons.append(0); additionalLonePairs.append(0)
elif e == '1':
radicalElectrons.append(1); additionalLonePairs.append(0)
elif e == '2':
if not group:
raise InvalidAdjacencyListError("Error in {0} adjacency list: Number of radical electrons = 2 is not specific enough. Please use 2S or 2T.".format(adjlist.splitlines()[0]))
# includes 2S and 2T
radicalElectrons.append(0); additionalLonePairs.append(1)
radicalElectrons.append(2); additionalLonePairs.append(0)
elif e == '2S':
radicalElectrons.append(0); additionalLonePairs.append(1)
elif e == '2T':
radicalElectrons.append(2); additionalLonePairs.append(0)
elif e == '3':
if not group:
raise InvalidAdjacencyListError("Error in {0} adjacency list: Number of radical electrons = 3 is not specific enough. Please use 3D or 3Q.".format(adjlist.splitlines()[0]))
# includes 3D and 3Q
radicalElectrons.append(1); additionalLonePairs.append(1)
radicalElectrons.append(3); additionalLonePairs.append(0)
elif e == '3D':
radicalElectrons.append(1); additionalLonePairs.append(1)
elif e == '3Q':
radicalElectrons.append(3); additionalLonePairs.append(0)
elif e == '4':
if not group:
raise InvalidAdjacencyListError("Error in {0} adjacency list: Number of radical electrons = 4 is not specific enough. Please use 4S, 4T, or 4V.".format(adjlist.splitlines()[0]))
# includes 4S, 4T, and 4V
radicalElectrons.append(0); additionalLonePairs.append(2)
radicalElectrons.append(2); additionalLonePairs.append(1)
radicalElectrons.append(4); additionalLonePairs.append(0)
elif e == '4S':
radicalElectrons.append(0); additionalLonePairs.append(2)
elif e == '4T':
radicalElectrons.append(2); additionalLonePairs.append(1)
elif e == '4V':
radicalElectrons.append(4); additionalLonePairs.append(0)
elif e == 'X':
if not group:
raise InvalidAdjacencyListError("Error in {0} adjacency list: Number of radical electrons = X is not specific enough. Wildcards should only be used for groups.".format(adjlist.splitlines()[0]))
radicalElectrons = []
index += 1
# Next number defines the number of lone electron pairs (if provided)
lonePairsOfElectrons = None
if len(data) > index:
lpState = data[index]
if lpState[0] == '{':
# this is the start of the chemical bonds - no lone pair info was provided
lonePairsOfElectrons = None
else:
if lpState == '0':
lonePairsOfElectrons = 0
if lpState == '1':
lonePairsOfElectrons = 1
if lpState == '2':
lonePairsOfElectrons = 2
if lpState == '3':
lonePairsOfElectrons = 3
if lpState == '4':
lonePairsOfElectrons = 4
index += 1
else: # no bonds or lone pair info provided.
lonePairsOfElectrons = None
# Create a new atom based on the above information
if group:
if lonePairsOfElectrons is not None:
lonePairsOfElectrons = [additional + lonePairsOfElectrons for additional in additionalLonePairs]
atom = GroupAtom(atomType=atomType,
radicalElectrons=sorted(set(radicalElectrons)),
charge=None,
label=label,
lonePairs=lonePairsOfElectrons, # Assign lonePairsOfElectrons as None if it is not explicitly provided
)
else:
standardLonePairs = {'H': 0, 'C': 0, 'O': 2, 'S': 2, 'Si': 0, 'Cl': 3, 'He': 1, 'Ne': 4, 'Ar': 4}
if lonePairsOfElectrons is not None:
# Intermediate adjlist representation
lonePairsOfElectrons = lonePairsOfElectrons + additionalLonePairs[0]
else:
# Add the standard number of lone pairs with the additional lone pairs
lonePairsOfElectrons = standardLonePairs[atomType[0]] + additionalLonePairs[0]
atom = Atom(element=atomType[0],
radicalElectrons=radicalElectrons[0],
charge=0,
label=label,
lonePairs=lonePairsOfElectrons,
)
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Error in {1} adjacency list: Attempted to create a bond between atom {0:d} and itself.'.format(aid,adjlist.splitlines()[0]))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Error in {1} adjacency list: Atom {0:d} not in bond dictionary.'.format(atom2,adjlist.splitlines()[0]))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Error in {2} adjacency list: Found bond between {0:d} and {1:d}, but not the reverse'.format(atom1, atom2, adjlist.splitlines()[0]))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Error in {4} adjacency list: Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist.splitlines()[0]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Error in {0} adjacency list: Multiple bond orders specified for an atom.'.format(adjlist.splitlines()[0]))
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if not group:
if saturateH:
# Add explicit hydrogen atoms to complete structure if desired
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
standardLonePairs = {'H': 0, 'C': 0, 'O': 2, 'S': 2, 'Si': 0, 'Cl': 3, 'He': 1, 'Ne': 4, 'Ar': 4}
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'Cl': 1, 'He': 0, 'Ne': 0, 'Ar': 0}
newAtoms = []
for atom in atoms:
try:
valence = valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Error in {1} adjacency list: Cannot add hydrogens: Unknown valence for atom "{0}".'.format(atom.symbol, adjlist.splitlines()[0]))
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
count = valence - radical - int(order) - 2*(atom.lonePairs-standardLonePairs[atom.symbol])
for i in range(count):
a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0)
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
# Calculate the multiplicity for the molecule and update the charges on each atom
nRad = 0 # total number of radical electrons
for atom in atoms:
atom.updateCharge()
nRad += atom.radicalElectrons
multiplicity = nRad + 1 # 2 s + 1, where s is the combined spin of unpaired electrons (s = 1/2 per unpaired electron)
else:
# Don't set a multiplicity for groups when converting from an old adjlist
multiplicity = None
except InvalidAdjacencyListError:
logging.error("Troublesome adjacency list:\n" + adjlist)
raise
return atoms, multiplicity
###############################
re_IntermediateAdjList = re.compile('^\s*(\d*)\s+' + # atom number digit
'(?P<label>\*\d*\s+)?' + # optional label eg * or *2
'(?P<atomtype>\{?[A-Z]\S*)\s+' + # atomtype eg R!H or {Cb,Cd}
'(?P<radicals>X|\d[STDQV]?|\{?\d[^}]*\})\s+' + #radicals eg. X or 2T or {1,2,2T}
'(?P<lonepairs>\d)' + # lone pairs eg. 0
'(?P<bonds>(\s+\{\d+\,(?:[SDTB]|\{.+?\})\},?)*)' + # bonds, eg {2,S} {4,{S,D}}
'\s*$') # the end!
re_OldAdjList = re.compile('^\s*(\d*)\s+' + # atom number digit
'(?P<label>\*\d*\s+)?' + # optional label eg * or *2
'(?P<atomtype>\{?[A-Z]\S*)\s+' + # atomtype eg R!H or {Cb,Cd}
'(?P<radicals>X|\d[STDQV]?|\{?\d[^}]*\})' + #radicals eg. X or 2T or {1,2,2T}
'(?P<bonds>(\s+\{\d+\,(?:[SDTB]|\{.+?\})\},?)*)' + # bonds, eg {2,S} {4,{S,D}}
'\s*$') # the end!
def fromAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
"""
atoms = []
atomdict = {}
bonds = {}
multiplicity = None
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Detect old-style adjacency lists by looking at the last line's syntax
lastLine = lines[-1].strip()
while not lastLine: # Remove any empty lines from the end
lines.pop()
lastLine = lines[-1].strip()
if re_IntermediateAdjList.match(lastLine):
logging.debug("{1} adjacency list line '{0}' looks like an intermediate style adjacency list".format(lastLine, adjlist.splitlines()[0]))
return fromOldAdjacencyList(adjlist, group=group, saturateH=saturateH)
if re_OldAdjList.match(lastLine):
logging.debug("{1} adjacency list line '{0}' looks like an old style adjacency list".format(lastLine, adjlist.splitlines()[0]))
if not group:
logging.debug("Will assume implicit H atoms")
return fromOldAdjacencyList(adjlist, group=group, saturateH=(not group))
# Interpret the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
# Interpret the second line if it contains a multiplicity
if lines[0].split()[0] == 'multiplicity':
line = lines.pop(0)
if group:
match = re.match('\s*multiplicity\s+\[\s*(\d(?:,\s*\d)*)\s*\]\s*$', line)
if not match:
rematch = re.match('\s*multiplicity\s+x\s*$', line)
assert rematch, "Invalid multiplicity line '{0}'. Should be a list like 'multiplicity [1,2,3]' or a wildcard 'multiplicity x'".format(line)
else:
# should match "multiplicity [1]" or " multiplicity [ 1, 2, 3 ]" or " multiplicity [1,2,3]"
# and whatever's inside the [] (excluding leading and trailing spaces) should be captured as group 1.
# If a wildcard is desired, this line can be omitted or replaced with 'multiplicity x'
# Multiplicities must be only one digit (i.e. less than 10)
# The (?:,\s*\d)* matches patters like ", 2" 0 or more times, but doesn't capture them (because of the leading ?:)
multiplicities = match.group(1).split(',')
multiplicity = [int(i) for i in multiplicities]
else:
match = re.match('\s*multiplicity\s+\d+\s*$', line)
assert match, "Invalid multiplicity line '{0}'. Should be an integer like 'multiplicity 2'".format(line)
multiplicity = int(line.split()[1])
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in {0} adjacency list.'.format(adjlist.splitlines()[0]))
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '[Cd, Ct]'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"{1} Shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group(), adjlist.splitlines()[0])
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '[':
if not group:
raise InvalidAdjacencyListError("Error on {0}: A molecule should not assign more than one atomtype per atom.".format(adjlist.splitlines()[0]))
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next the number of unpaired electrons
unpairedElectrons = []
uState = data[index]
if uState[0] == 'u':
if uState[1] == '[':
uState = uState[2:-1].split(',')
else:
uState = [uState[1]]
for u in uState:
if u == '0':
unpairedElectrons.append(0)
elif u == '1':
unpairedElectrons.append(1)
elif u == '2':
unpairedElectrons.append(2)
elif u == '3':
unpairedElectrons.append(3)
elif u == '4':
unpairedElectrons.append(4)
elif u == 'x':
if not group:
raise InvalidAdjacencyListError("Error on {0}: A molecule should not assign a wildcard to number of unpaired electrons.".format(adjlist.splitlines()[0]))
else:
raise InvalidAdjacencyListError('Number of unpaired electrons not recognized on {0}.'.format(*adjlist.splitlines()[0]))
index += 1
else:
raise InvalidAdjacencyListError('Number of unpaired electrons not defined on {0}.'.format(adjlist.splitlines()[0]))
# Next the number of lone electron pairs (if provided)
lonePairs = []
if len(data) > index:
lpState = data[index]
if lpState[0] == 'p':
if lpState[1] == '[':
lpState = lpState[2:-1].split(',')
else:
lpState = [lpState[1]]
for l in lpState:
if l == '0':
lonePairs.append(0)
elif l == '1':
lonePairs.append(1)
elif l == '2':
lonePairs.append(2)
elif l == '3':
lonePairs.append(3)
elif l == '4':
lonePairs.append(4)
elif l == 'x':
if not group:
raise InvalidAdjacencyListError("Error in {0} adjacency list: A molecule should not have a wildcard assigned to number of lone pairs.".format(adjlist.splitlines()[0]))
else:
raise InvalidAdjacencyListError('Error in {0} adjacency list: Number of lone electron pairs not recognized.'.format(adjlist.splitlines()[0]))
index += 1
else:
if not group:
lonePairs.append(0)
else:
if not group:
lonePairs.append(0)
# Next the number of partial charges (if provided)
partialCharges = []
if len(data) > index:
eState = data[index]
if eState[0] == 'c':
if eState[1] == '[':
eState = eState[2:-1].split(',')
else:
eState = [eState[1:]]
for e in eState:
if e == '0':
partialCharges.append(0)
elif e == '+1':
partialCharges.append(1)
elif e == '+2':
partialCharges.append(2)
elif e == '+3':
partialCharges.append(3)
elif e == '+4':
partialCharges.append(4)
elif e == '-1':
partialCharges.append(-1)
elif e == '-2':
partialCharges.append(-2)
elif e == '-3':
partialCharges.append(-3)
elif e == '-4':
partialCharges.append(-4)
elif e == 'x':
if not group:
raise InvalidAdjacencyListError("Error on {0} adjacency list: A molecule should not have a wildcard assigned to number of charges.".format(adjlist.splitlines()[0]))
else:
raise InvalidAdjacencyListError('Error on {0} adjacency list: Number of partial charges not recognized.'.format(adjlist.splitlines()[0]))
index += 1
else:
if not group:
partialCharges.append(0)
else:
if not group:
partialCharges.append(0)
# Create a new atom based on the above information
if group:
atom = GroupAtom(atomType, unpairedElectrons, partialCharges, label, lonePairs)
else:
atom = Atom(atomType[0], unpairedElectrons[0], partialCharges[0], label, lonePairs[0])
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Error in {1} adjacency list: Attempted to create a bond between atom {0:d} and itself.'.format(aid, adjlist.splitlines()[0]))
if order[0] == '[':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Error in {1} adjacency list: Atom {0:d} not in bond dictionary.'.format(atom2, adjlist.splitlines()[0]))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Error in {2} adjacency list: Found bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2, adjlist.splitlines()[0]))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Error in {4} adjacency list: Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist.splitlines()[0]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Error in {0} adjacency list: Multiple bond orders specified for an atom in a Molecule.'.format(adjlist.splitlines()[0]))
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if saturateH:
# Add explicit hydrogen atoms to complete structure if desired
if not group:
Saturator.saturate(atoms)
# Consistency checks
if not group:
# Molecule consistency check
# Electron and valency consistency check for each atom
for atom in atoms: ConsistencyChecker.check_partial_charge(atom)
nRad = sum([atom.radicalElectrons for atom in atoms])
absolute_spin_per_electron = 1/2.
if multiplicity == None: multiplicity = 2* (nRad * absolute_spin_per_electron) + 1
ConsistencyChecker.check_multiplicity(nRad, multiplicity)
for atom in atoms: ConsistencyChecker.check_hund_rule(atom, multiplicity)
return atoms, multiplicity
else:
# Currently no group consistency check
return atoms, multiplicity
def toAdjacencyList(atoms, multiplicity, label=None, group=False, removeH=False, removeLonePairs=False, oldStyle=False):
"""
Convert a chemical graph defined by a list of `atoms` into a string
adjacency list.
"""
if oldStyle:
return toOldAdjacencyList(atoms, multiplicity, label, group, removeH)
adjlist = ''
# Don't remove hydrogen atoms if the molecule consists only of hydrogen atoms
try:
if removeH and all([atom.element.symbol == 'H' for atom in atoms]): removeH = False
except AttributeError:
pass
if label: adjlist += label + '\n'
if group:
if multiplicity:
# Functional group should have a list of possible multiplicities.
# If the list is empty, then it does not need to be written
adjlist += 'multiplicity [{0!s}]\n'.format(','.join(str(i) for i in multiplicity))
else:
assert isinstance(multiplicity, int), "Molecule should have an integer multiplicity"
if multiplicity != 1 or any( atom.radicalElectrons for atom in atoms ):
adjlist += 'multiplicity {0!r}\n'.format(multiplicity)
# Determine the numbers to use for each atom
atomNumbers = {}
index = 0
for atom in atoms:
if removeH and atom.element.symbol == 'H' and atom.label == '': continue
atomNumbers[atom] = '{0:d}'.format(index + 1)
index += 1
atomLabels = dict([(atom, '{0}'.format(atom.label)) for atom in atomNumbers])
atomTypes = {}
atomUnpairedElectrons = {}
atomLonePairs = {}
atomCharge = {}
if group:
for atom in atomNumbers:
# Atom type(s)
if len(atom.atomType) == 1:
atomTypes[atom] = atom.atomType[0].label
else:
atomTypes[atom] = '[{0}]'.format(','.join([a.label for a in atom.atomType]))
# Unpaired Electron(s)
if len(atom.radicalElectrons) == 1:
atomUnpairedElectrons[atom] = str(atom.radicalElectrons[0])
elif len(atom.radicalElectrons) == 0:
atomUnpairedElectrons[atom] = 'x' # Empty list indicates wildcard
else:
atomUnpairedElectrons[atom] = '[{0}]'.format(','.join([str(radical) for radical in atom.radicalElectrons]))
# Lone Electron Pair(s)
if len(atom.lonePairs) == 1:
atomLonePairs[atom] = str(atom.lonePairs[0])
elif len(atom.lonePairs) == 0:
atomLonePairs[atom] = None # Empty list indicates wildcard
else:
atomLonePairs[atom] = '[{0}]'.format(','.join([str(pair) for pair in atom.lonePairs]))
# Charges
if len(atom.charge) == 1:
atomCharge[atom] = atom.charge[0]
elif len(atom.charge) == 0:
atomCharge[atom] = None # Empty list indicates wildcard
else:
atomCharge[atom] = '[{0}]'.format(','.join(['+'+str(charge) if charge > 0 else ''+str(charge) for charge in atom.charge]))
else:
for atom in atomNumbers:
# Atom type
atomTypes[atom] = '{0}'.format(atom.element.symbol)
# Unpaired Electron(s)
atomUnpairedElectrons[atom] = '{0}'.format(atom.radicalElectrons)
# Lone Electron Pair(s)
atomLonePairs[atom] = str(atom.lonePairs)
# Partial Charge(s)
atomCharge[atom] = '+'+str(atom.charge) if atom.charge > 0 else '' + str(atom.charge)
# Determine field widths
atomNumberWidth = max([len(s) for s in atomNumbers.values()]) + 1
atomLabelWidth = max([len(s) for s in atomLabels.values()])
if atomLabelWidth > 0: atomLabelWidth += 1
atomTypeWidth = max([len(s) for s in atomTypes.values()]) + 1
atomUnpairedElectronsWidth = max([len(s) for s in atomUnpairedElectrons.values()])
#atomLonePairWidth = max([len(s) for s in atomLonePairs.values()])
#atomChargeWidth = max([len(s) for s in atomCharge.values()])
# Assemble the adjacency list
for atom in atoms:
if atom not in atomNumbers: continue
# Atom number
adjlist += '{0:<{1:d}}'.format(atomNumbers[atom], atomNumberWidth)
# Atom label
adjlist += '{0:<{1:d}}'.format(atomLabels[atom], atomLabelWidth)
# Atom type(s)
adjlist += '{0:<{1:d}}'.format(atomTypes[atom], atomTypeWidth)
# Unpaired Electron(s)
adjlist += 'u{0:<{1:d}}'.format(atomUnpairedElectrons[atom], atomUnpairedElectronsWidth)
# Lone Electron Pair(s)
if atomLonePairs[atom] != None:
adjlist += ' p{0}'.format(atomLonePairs[atom])
# Partial charges
if atomCharge[atom] != None:
adjlist += ' c{0}'.format(atomCharge[atom])
# Bonds list
atoms2 = atom.bonds.keys()
# sort them the same way as the atoms
atoms2.sort(key=atoms.index)
for atom2 in atoms2:
if atom2 not in atomNumbers: continue
bond = atom.bonds[atom2]
adjlist += ' {{{0},'.format(atomNumbers[atom2])
# Bond type(s)
if group:
if len(bond.order) == 1:
adjlist += bond.order[0]
else:
adjlist += '[{0}]'.format(','.join(bond.order))
else:
adjlist += bond.order
adjlist += '}'
# Each atom begins on a new line
adjlist += '\n'
return adjlist
def getOldElectronState(atom):
"""
Get the old adjacency list format electronic state
"""
standardLonePairs = {'H': 0, 'C': 0, 'O': 2, 'S': 2, 'Si': 0, 'Cl': 3, 'He': 1, 'Ne': 4, 'Ar': 4}
additionalLonePairs = atom.lonePairs - standardLonePairs[atom.element.symbol]
electrons = atom.radicalElectrons + additionalLonePairs * 2
if electrons == 0:
electronState = '0'
elif electrons == 1:
electronState = '1'
elif electrons == 2:
if additionalLonePairs == 0:
electronState = '2T'
elif additionalLonePairs == 1:
electronState = '2S'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
elif electrons == 3:
if additionalLonePairs == 0:
electronState = '3Q'
elif additionalLonePairs == 1:
electronState = '3D'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
elif electrons == 4:
if additionalLonePairs == 0:
electronState = '4V'
elif additionalLonePairs == 1:
electronState = '4T'
elif additionalLonePairs == 2:
electronState = '4S'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
return electronState
def toOldAdjacencyList(atoms, multiplicity=None, label=None, group=False, removeH=False):
"""
Convert a chemical graph defined by a list of `atoms` into a string old-style
adjacency list that can be used in RMG-Java. Currently not working for groups.
"""
adjlist = ''
if group:
raise InvalidAdjacencyListError("Not yet implemented.")
# Filter out all non-valid atoms
if not group:
for atom in atoms:
if atom.element.symbol in ['He','Ne','Ar','N']:
raise InvalidAdjacencyListError("Old-style adjacency list does not accept He, Ne, Ar, N elements.")
# Don't remove hydrogen atoms if the molecule consists only of hydrogen atoms
try:
if removeH and all([atom.element.symbol == 'H' for atom in atoms]): removeH = False
except AttributeError:
pass
if label: adjlist += label + '\n'
# Determine the numbers to use for each atom
atomNumbers = {}
index = 0
for atom in atoms:
if removeH and atom.element.symbol == 'H' and atom.label == '': continue
atomNumbers[atom] = '{0:d}'.format(index + 1)
index += 1
atomLabels = dict([(atom, '{0}'.format(atom.label)) for atom in atomNumbers])
atomTypes = {}
atomElectronStates = {}
if group:
raise InvalidAdjacencyListError("Not yet implemented.")
else:
for atom in atomNumbers:
# Atom type
atomTypes[atom] = '{0}'.format(atom.element.symbol)
# Electron state(s)
atomElectronStates[atom] = '{0}'.format(getOldElectronState(atom))
# Determine field widths
atomNumberWidth = max([len(s) for s in atomNumbers.values()]) + 1
atomLabelWidth = max([len(s) for s in atomLabels.values()])
if atomLabelWidth > 0: atomLabelWidth += 1
atomTypeWidth = max([len(s) for s in atomTypes.values()]) + 1
atomElectronStateWidth = max([len(s) for s in atomElectronStates.values()])
# Assemble the adjacency list
for atom in atoms:
if atom not in atomNumbers: continue
# Atom number
adjlist += '{0:<{1:d}}'.format(atomNumbers[atom], atomNumberWidth)
# Atom label
adjlist += '{0:<{1:d}}'.format(atomLabels[atom], atomLabelWidth)
# Atom type(s)
adjlist += '{0:<{1:d}}'.format(atomTypes[atom], atomTypeWidth)
# Electron state(s)
adjlist += '{0:<{1:d}}'.format(atomElectronStates[atom], atomElectronStateWidth)
# Bonds list
atoms2 = atom.bonds.keys()
# sort them the same way as the atoms
atoms2.sort(key=atoms.index)
for atom2 in atoms2:
if atom2 not in atomNumbers: continue
bond = atom.bonds[atom2]
adjlist += ' {{{0},'.format(atomNumbers[atom2])
# Bond type(s)
if group:
if len(bond.order) == 1:
adjlist += bond.order[0]
else:
adjlist += '{{{0}}}'.format(','.join(bond.order))
else:
adjlist += bond.order
adjlist += '}'
# Each atom begins on a new line
adjlist += '\n'
return adjlist
| enochd/RMG-Py | rmgpy/molecule/adjlist.py | Python | mit | 44,800 | [
"ChemPy"
] | 9488885a65094fd8875e8d456766d75a77dd23fe5102949c1ec2d889a717dca4 |
# Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_can_instantiate():
import brainiak.reprsimil.brsa
s = brainiak.reprsimil.brsa.BRSA()
assert s, "Invalid BRSA instance!"
s = brainiak.reprsimil.brsa.BRSA(
n_iter=50, rank=5, auto_nuisance=False, n_nureg=2, nureg_method='ICA',
baseline_single=False, init_iter=5, GP_space=True, GP_inten=True,
tol=2e-3, eta=0.001, space_smooth_range=10.0, inten_smooth_range=100.0,
tau_range=2.0,
tau2_prior=brainiak.reprsimil.brsa.prior_GP_var_inv_gamma,
optimizer='CG', random_state=100, anneal_speed=20)
assert s, "Invalid BRSA instance!"
def test_fit():
from brainiak.reprsimil.brsa import BRSA
import brainiak.utils.utils as utils
import scipy.stats
import numpy as np
import os.path
np.random.seed(10)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
# concatenate it by 2 times, mimicking 2 runs of itenditcal timing
n_run = 2
design.design_task = np.tile(design.design_task[:, :-1], [n_run, 1])
design.n_TR = design.n_TR * n_run
# start simulating some data
n_V = 50
n_C = np.size(design.design_task, axis=1)
n_T = design.n_TR
noise_bot = 0.5
noise_top = 5.0
noise_level = np.random.rand(n_V) * (noise_top - noise_bot) + noise_bot
# noise level is random.
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = np.random.rand(n_V) * (rho1_top - rho1_bot) + rho1_bot
# generating noise
noise = np.zeros([n_T, n_V])
noise[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise[i_t, :] = noise[i_t - 1, :] * rho1 + \
np.random.randn(n_V) * noise_level
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0:4, 0:4] = 0.2
for cond in range(0, 4):
ideal_cov[cond, cond] = 2
ideal_cov[5:9, 5:9] = 0.9
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_level = 5.0 # test with high SNR
# snr = np.random.rand(n_V)*(snr_top-snr_bot)+snr_bot
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends
# not only on beta but also on x.
inten = np.random.rand(n_V) * 20.0
# parameters of Gaussian process to generate pseuso SNR
tau = 1.0
smooth_width = 5.0
inten_kernel = 1.0
coords = np.arange(0, n_V)[:, None]
dist2 = np.square(coords - coords.T)
inten_tile = np.tile(inten, [n_V, 1])
inten_diff2 = (inten_tile - inten_tile.T)**2
K = np.exp(-dist2 / smooth_width**2 / 2.0 - inten_diff2 /
inten_kernel**2 / 2.0) * tau**2 + np.eye(n_V) * tau**2 * 0.001
L = np.linalg.cholesky(K)
snr = np.exp(np.dot(L, np.random.randn(n_V))) * snr_level
sqrt_v = noise_level * snr
betas_simulated = np.dot(L_full, np.random.randn(n_C, n_V)) * sqrt_v
signal = np.dot(design.design_task, betas_simulated)
# Adding noise to signal as data
Y = signal + noise + inten
scan_onsets = np.linspace(0, design.n_TR, num=n_run + 1)
# Test fitting with GP prior.
brsa = BRSA(GP_space=True, GP_inten=True, n_iter=5,
init_iter=10, auto_nuisance=False, tol=2e-3)
# We also test that it can detect baseline regressor included in the
# design matrix for task conditions
wrong_design = np.insert(design.design_task, 0, 1, axis=1)
with pytest.raises(ValueError) as excinfo:
brsa.fit(X=Y, design=wrong_design, scan_onsets=scan_onsets,
coords=coords, inten=inten)
assert ('Your design matrix appears to have included baseline time series.'
in str(excinfo.value))
# Now we fit with the correct design matrix.
brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets,
coords=coords, inten=inten)
# Check that result is significantly correlated with the ideal covariance
# matrix
u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],
u_i[np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
noise_new = np.zeros([n_T, n_V])
noise_new[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise_new[i_t, :] = noise_new[i_t - 1, :] * \
rho1 + np.random.randn(n_V) * noise_level
Y_new = signal + noise_new + inten
ts, ts0 = brsa.transform(Y_new, scan_onsets=scan_onsets)
p = scipy.stats.pearsonr(ts[:, 0], design.design_task[:, 0])[1]
assert p < 0.01, (
"Recovered time series does not correlate with true time series!")
assert np.shape(ts) == (n_T, n_C) and np.shape(ts0) == (n_T, 1), (
"Wrong shape in returned time series by transform function!")
[score, score_null] = brsa.score(
X=Y_new, design=design.design_task, scan_onsets=scan_onsets)
assert score > score_null, (
"Full model does not win over null model on data containing signal")
[score, score_null] = brsa.score(X=noise_new + inten,
design=design.design_task,
scan_onsets=scan_onsets)
assert score < score_null, (
"Null model does not win over full model on data without signal")
# Test fitting with lower rank, nuisance regressors and without GP prior
rank = n_C - 1
n_nureg = 1
brsa = BRSA(rank=rank, n_nureg=n_nureg, tol=2e-3,
n_iter=8, init_iter=4, auto_nuisance=True)
brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets)
# u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)], u_i[
np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
assert (not hasattr(brsa, 'bGP_')
and not hasattr(brsa, 'lGPspace_')
and not hasattr(brsa, 'lGPinten_')
), ("the BRSA object should not have parameters of GP if GP is "
"not requested.")
# GP parameters are not set if not requested
assert brsa.beta0_.shape[0] == n_nureg + 1, 'Shape of beta0 incorrect'
p = scipy.stats.pearsonr(brsa.beta0_[0, :], inten)[1]
assert p < 0.01, (
'recovered beta0 does not correlate with the baseline of voxels.')
assert np.shape(brsa.L_) == (
n_C, rank), 'Cholesky factor should have shape of (n_C, rank)'
# Test fitting with GP over just spatial coordinates.
brsa = BRSA(GP_space=True, baseline_single=False,
tol=2e-3, n_iter=4, init_iter=4)
brsa.fit(X=Y, design=design.design_task,
scan_onsets=scan_onsets, coords=coords)
# Check that result is significantly correlated with the ideal covariance
# matrix
u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)], u_i[
np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
assert not hasattr(brsa, 'lGPinten_'), (
"the BRSA object should not have parameters of lGPinten_ if only "
"smoothness in space is requested.")
# GP parameters are not set if not requested
def test_gradient():
from brainiak.reprsimil.brsa import BRSA
import brainiak.utils.utils as utils
import numpy as np
import os.path
import numdifftools as nd
np.random.seed(100)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
n_run = 4
# concatenate it by 4 times, mimicking 4 runs of itenditcal timing
design.design_task = np.tile(design.design_task[:, :-1], [n_run, 1])
design.n_TR = design.n_TR * n_run
# start simulating some data
n_V = 30
n_C = np.size(design.design_task, axis=1)
n_T = design.n_TR
noise_bot = 0.5
noise_top = 1.5
noise_level = np.random.rand(n_V) * (noise_top - noise_bot) + noise_bot
# noise level is random.
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = np.random.rand(n_V) * (rho1_top - rho1_bot) + rho1_bot
# generating noise
noise = np.zeros([n_T, n_V])
noise[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise[i_t, :] = noise[i_t - 1, :] * rho1 + \
np.random.randn(n_V) * noise_level
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0, 0] = 0.2
ideal_cov[5:9, 5:9] = 0.6
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_level = 5.0 # test with high SNR
inten = np.random.randn(n_V) * 20.0
# parameters of Gaussian process to generate pseuso SNR
tau = 0.8
smooth_width = 5.0
inten_kernel = 1.0
coords = np.arange(0, n_V)[:, None]
dist2 = np.square(coords - coords.T)
inten_tile = np.tile(inten, [n_V, 1])
inten_diff2 = (inten_tile - inten_tile.T)**2
K = np.exp(-dist2 / smooth_width**2 / 2.0 - inten_diff2 /
inten_kernel**2 / 2.0) * tau**2 + np.eye(n_V) * tau**2 * 0.001
L = np.linalg.cholesky(K)
snr = np.exp(np.dot(L, np.random.randn(n_V))) * snr_level
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends not only on beta but also on x.
sqrt_v = noise_level * snr
betas_simulated = np.dot(L_full, np.random.randn(n_C, n_V)) * sqrt_v
signal = np.dot(design.design_task, betas_simulated)
# Adding noise to signal as data
Y = signal + noise
scan_onsets = np.linspace(0, design.n_TR, num=n_run + 1)
# Test fitting with GP prior.
brsa = BRSA(GP_space=True, GP_inten=True, rank=n_C)
# Additionally, we test the generation of re-used terms.
X0 = np.ones(n_T)[:, None]
D, F, run_TRs, n_run_returned = brsa._prepare_DF(
n_T, scan_onsets=scan_onsets)
assert np.shape(D) == (n_T, n_T), 'D has wrong shape'
assert np.shape(F) == (n_T, n_T), 'F has wrong shape'
assert np.sum(D) == (n_T - n_run) * 2, 'D is initialized incorrectly.'
assert np.sum(F) == n_T - n_run * 2, 'F is initialized incorrectly.'
assert n_run_returned == n_run, (
'There is mistake in counting number of runs')
assert np.sum(run_TRs) == n_T, (
'The segmentation of the total experiment duration is wrong')
(XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, XTDX, XTFX
) = brsa._prepare_data_XY(design.design_task, Y, D, F)
(X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, X0, X_base,
n_X0, idx_DC
) = brsa._prepare_data_XYX0(design.design_task, Y, X0,
np.random.randn(n_T)[:, None], D, F, run_TRs,
no_DC=False)
assert (np.shape(XTY) == (n_C, n_V)
and np.shape(XTDY) == (n_C, n_V)
and np.shape(XTFY) == (n_C, n_V)
), 'Dimension of XTY etc. returned from _prepare_data is wrong'
assert (np.ndim(YTY_diag) == 1
and np.ndim(YTDY_diag) == 1
and np.ndim(YTFY_diag) == 1
), ("Dimension of YTY_diag etc. returned from _prepare_data is "
"wrong")
assert (np.ndim(XTX) == 2
and np.ndim(XTDX) == 2
and np.ndim(XTFX) == 2
), 'Dimension of XTX etc. returned from _prepare_data is wrong'
assert (np.ndim(X0TX0) == 2
and np.ndim(X0TDX0) == 2
and np.ndim(X0TFX0) == 2
), 'Dimension of X0TX0 etc. returned from _prepare_data is wrong'
assert (np.ndim(XTX0) == 2
and np.ndim(XTDX0) == 2
and np.ndim(XTFX0) == 2
), 'Dimension of XTX0 etc. returned from _prepare_data is wrong'
assert (np.ndim(X0TY) == 2
and np.ndim(X0TDY) == 2
and np.ndim(X0TFY) == 2
), 'Dimension of X0TY etc. returned from _prepare_data is wrong'
assert (np.shape(X0) == (n_T, n_X0)
and np.shape(X_base) == (n_T, np.size(idx_DC))
and np.max(idx_DC) < n_X0
and np.size(idx_DC) + 1 == n_X0
), ("Dimension of X0 or X_base, or n_X0 or indices of DC "
"components are wrong.")
l_idx = np.tril_indices(n_C)
n_l = np.size(l_idx[0])
# Make sure all the fields are in the indices.
idx_param_sing, idx_param_fitU, idx_param_fitV = brsa._build_index_param(
n_l, n_V, 2)
assert 'Cholesky' in idx_param_sing and 'a1' in idx_param_sing, \
'The dictionary for parameter indexing misses some keys'
assert 'Cholesky' in idx_param_fitU and 'a1' in idx_param_fitU, \
'The dictionary for parameter indexing misses some keys'
assert 'log_SNR2' in idx_param_fitV and 'c_space' in idx_param_fitV \
and 'c_inten' in idx_param_fitV and 'c_both' in idx_param_fitV, \
'The dictionary for parameter indexing misses some keys'
# Initial parameters are correct parameters with some perturbation
param0_fitU = np.random.randn(n_l + n_V) * 0.1
param0_fitV = np.random.randn(n_V + 1) * 0.1
param0_sing = np.random.randn(n_l + 1) * 0.1
param0_sing[idx_param_sing['a1']] += np.mean(np.tan(rho1 * np.pi / 2))
param0_fitV[idx_param_fitV['log_SNR2']] += np.log(snr[:n_V - 1]) * 2
param0_fitV[idx_param_fitV['c_space']] += np.log(smooth_width) * 2
param0_fitV[idx_param_fitV['c_inten']] += np.log(inten_kernel) * 2
# test if the gradients are correct
# log likelihood and derivative of the _singpara function
ll0, deriv0 = brsa._loglike_AR1_singpara(param0_sing, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY,
X0TDY, X0TFY, l_idx, n_C, n_T,
n_V, n_run, n_X0, idx_param_sing)
# We test the gradient to the Cholesky factor
vec = np.zeros(np.size(param0_sing))
vec[idx_param_sing['Cholesky'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_singpara(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0, XTX0,
XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing)[0],
param0_sing,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of singpara wrt Cholesky is incorrect')
# We test the gradient to a1
vec = np.zeros(np.size(param0_sing))
vec[idx_param_sing['a1']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_singpara(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0, XTX0,
XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing)[0],
param0_sing,
vec)
assert np.isclose(dd, np.dot(deriv0, vec),
rtol=1e-5), 'gradient of singpara wrt a1 is incorrect'
# log likelihood and derivative of the fitU function.
ll0, deriv0 = brsa._loglike_AR1_diagV_fitU(param0_fitU, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY, np.log(snr)
* 2, l_idx, n_C, n_T, n_V,
n_run, n_X0, idx_param_fitU,
n_C)
# We test the gradient wrt the reparametrization of AR(1) coefficient of
# noise.
vec = np.zeros(np.size(param0_fitU))
vec[idx_param_fitU['a1'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitU wrt to AR(1) coefficient incorrect')
# We test if the numerical and analytical gradient wrt to the first
# element of Cholesky factor is correct
vec = np.zeros(np.size(param0_fitU))
vec[idx_param_fitU['Cholesky'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitU wrt Cholesky factor incorrect')
# Test on a random direction
vec = np.random.randn(np.size(param0_fitU))
vec = vec / np.linalg.norm(vec)
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec),
rtol=1e-5), 'gradient of fitU incorrect'
# We test the gradient of _fitV wrt to log(SNR^2) assuming no GP prior.
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
brsa._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L_full, rho1, n_V, n_X0)
assert np.shape(XTAcorrX) == (n_V, n_C, n_C), (
'Dimension of XTAcorrX is wrong by _precompute_ar1_quad_forms()')
assert XTAcorrY.shape == XTY.shape, (
'Shape of XTAcorrY is wrong by _precompute_ar1_quad_forms()')
assert YTAcorrY.shape == YTY_diag.shape, (
'Shape of YTAcorrY is wrong by _precompute_ar1_quad_forms()')
assert np.shape(X0TAX0) == (n_V, n_X0, n_X0), (
'Dimension of X0TAX0 is wrong by _precompute_ar1_quad_forms()')
assert np.shape(XTAX0) == (n_V, n_C, n_X0), (
'Dimension of XTAX0 is wrong by _precompute_ar1_quad_forms()')
assert X0TAY.shape == X0TY.shape, (
'Shape of X0TAX0 is wrong by _precompute_ar1_quad_forms()')
assert np.all(np.isfinite(X0TAX0_i)), (
'Inverse of X0TAX0 includes NaN or Inf')
ll0, deriv0 = brsa._loglike_AR1_diagV_fitV(
param0_fitV[idx_param_fitV['log_SNR2']], X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx], np.tan(rho1 * np.pi / 2), l_idx, n_C, n_T,
n_V, n_run, n_X0, idx_param_fitV, n_C, False, False)
vec = np.zeros(np.size(param0_fitV[idx_param_fitV['log_SNR2']]))
vec[idx_param_fitV['log_SNR2'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C,
False, False)[0],
param0_fitV[idx_param_fitV['log_SNR2']],
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt log(SNR2) incorrect for model without GP')
# We test the gradient of _fitV wrt to log(SNR^2) assuming GP prior.
ll0, deriv0 = brsa._loglike_AR1_diagV_fitV(
param0_fitV, X0TAX0, XTAX0, X0TAY, X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2), l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['log_SNR2'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV srt log(SNR2) incorrect for model with GP')
# We test the graident wrt spatial length scale parameter of GP prior
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['c_space']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt spatial length scale of GP incorrect')
# We test the graident wrt intensity length scale parameter of GP prior
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['c_inten']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt intensity length scale of GP incorrect')
# We test the graident on a random direction
vec = np.random.randn(np.size(param0_fitV))
vec = vec / np.linalg.norm(vec)
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV incorrect')
def test_nureg_determine():
from brainiak.reprsimil.brsa import Ncomp_SVHT_MG_DLD_approx
import numpy as np
x = np.dot(np.random.randn(100, 5), np.random.randn(5, 40)) + \
np.random.randn(100, 40) * 0.01
ncomp = Ncomp_SVHT_MG_DLD_approx(x)
assert ncomp >= 3 and ncomp <= 8, (
'recovered number of components should be in a reasonable range')
def test_half_log_det():
import numpy as np
from brainiak.reprsimil.brsa import BRSA
a = np.asarray([[1, 0.2], [0.2, 1]])
brsa = BRSA()
half_log_det = np.log(np.linalg.det(a)) / 2
assert np.isclose(half_log_det, brsa._half_log_det(
a)), 'half log determinant function is wrong'
def test_n_nureg():
import brainiak.reprsimil.brsa
import numpy as np
# noise = np.random.randn(100,30)
noise = np.dot(np.random.randn(100, 8), np.random.randn(
8, 30)) + np.random.randn(100, 30) * 0.01
design = np.random.randn(100, 2)
s = brainiak.reprsimil.brsa.BRSA(n_iter=2)
s.fit(X=noise, design=design)
assert s.n_nureg_ > 2 and s.n_nureg_ < 16, (
'n_nureg_ estimation is wrong in BRSA')
| lcnature/brainiak | tests/reprsimil/test_brsa.py | Python | apache-2.0 | 29,837 | [
"Gaussian"
] | 3598455d051875cc9e19c650027991f110d22081060cef2e71384cc42be50609 |
# -*- coding: utf-8 -*-
"""Plist parser plugin for Safari history plist files."""
from dfdatetime import cocoa_time as dfdatetime_cocoa_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class SafariHistoryEventData(events.EventData):
"""Safari history event data.
Attributes:
display_title (str): display title of the webpage visited.
title (str): title of the webpage visited.
url (str): URL visited.
visit_count (int): number of times the website was visited.
was_http_non_get (bool): True if the webpage was visited using a non-GET
HTTP request.
"""
DATA_TYPE = 'safari:history:visit'
def __init__(self):
"""Initializes event data."""
super(SafariHistoryEventData, self).__init__(data_type=self.DATA_TYPE)
self.display_title = None
self.title = None
self.url = None
self.visit_count = None
self.was_http_non_get = None
class SafariHistoryPlugin(interface.PlistPlugin):
"""Plist parser plugin for Safari history plist files."""
NAME = 'safari_history'
DATA_FORMAT = 'Safari history plist file'
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('History.plist')])
PLIST_KEYS = frozenset(['WebHistoryDates', 'WebHistoryFileVersion'])
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts Safari history items.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
format_version = match.get('WebHistoryFileVersion', None)
if format_version != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported Safari history version: {0!s}'.format(format_version))
return
if 'WebHistoryDates' not in match:
return
for history_entry in match.get('WebHistoryDates', {}):
last_visited_date = history_entry.get('lastVisitedDate', None)
if last_visited_date is None:
parser_mediator.ProduceExtractionWarning('missing last visited date')
continue
try:
# Last visited date is a string containing a floating point value.
timestamp = float(last_visited_date)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'unable to convert last visited date {0:s}'.format(
last_visited_date))
continue
display_title = history_entry.get('displayTitle', None)
event_data = SafariHistoryEventData()
if display_title != event_data.title:
event_data.display_title = display_title
event_data.title = history_entry.get('title', None)
event_data.url = history_entry.get('', None)
event_data.visit_count = history_entry.get('visitCount', None)
event_data.was_http_non_get = history_entry.get(
'lastVisitWasHTTPNonGet', None)
# Convert the floating point value to an integer.
# TODO: add support for the fractional part of the floating point value.
timestamp = int(timestamp)
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(SafariHistoryPlugin)
| kiddinn/plaso | plaso/parsers/plist_plugins/safari.py | Python | apache-2.0 | 3,591 | [
"VisIt"
] | 3c184c0785e56de3fada2baf27cc293b97e57475cffebc3a756d9af44d445f39 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Build and Launch iPhone Application in Simulator or install
# the application on the device via iTunes
#
import os, sys, uuid, subprocess, shutil, signal, string, traceback, imp, filecmp, inspect
import platform, time, re, run, glob, codecs, hashlib, datetime, plistlib
from compiler import Compiler, softlink_for_simulator
from projector import Projector
from xml.dom.minidom import parseString
from xml.etree.ElementTree import ElementTree
from os.path import join, splitext, split, exists
from tools import ensure_dev_path
# the template_dir is the path where this file lives on disk
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
# add the parent and the common directory so we can load libraries from those paths too
sys.path.append(os.path.join(template_dir,'../'))
sys.path.append(os.path.join(template_dir,'../common'))
sys.path.append(os.path.join(template_dir, '../module'))
script_ok = False
from tiapp import *
from css import csscompiler
import localecompiler
from module import ModuleDetector
from tools import *
ignoreFiles = ['.gitignore', '.cvsignore']
ignoreDirs = ['.git','.svn', 'CVS']
# need this so unicode works
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
def version_sort(a,b):
x = float(a[0:3]) # ignore more than 2 places
y = float(b[0:3]) # ignore more than 2 places
if x > y:
return -1
if x < y:
return 1
return 0
# this will return the version of the iOS SDK that we have installed
def check_iphone_sdk(s):
found = []
output = run.run(["xcodebuild","-showsdks"],True,False)
#print output
if output:
for line in output.split("\n"):
if line[0:1] == '\t':
line = line.strip()
i = line.find('-sdk')
if i < 0: continue
type = line[0:i]
cmd = line[i+5:]
if cmd.find("iphoneos")==0:
ver = cmd[8:]
found.append(ver)
# The sanity check doesn't have to be as thorough as prereq.
if s in found:
return s
# Sanity check failed. Let's find something close.
return sorted(found,version_sort)[0]
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
# force kill the simulator if running
def kill_simulator():
run.run(['/usr/bin/killall',"ios-sim"],True)
run.run(['/usr/bin/killall',"iPhone Simulator"],True)
def write_project_property(f,prop,val):
existing_val = read_project_property(f,prop)
if existing_val!=val:
fx = open(f,'w')
fx.write("%s=%s\n"%(prop,val))
fx.close()
def read_project_property(f,prop):
if os.path.exists(f):
contents = open(f).read()
for line in contents.splitlines(False):
(k,v) = line.split("=")
if k == prop:
return v
return None
def read_project_appid(f):
return read_project_property(f,'TI_APPID')
def read_project_version(f):
return read_project_property(f,'TI_VERSION')
def infoplist_has_appid(f,appid):
if os.path.exists(f):
contents = codecs.open(f,encoding='utf-8').read()
return contents.find(appid)>0
return False
def copy_module_resources(source, target, copy_all=False, force=False):
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if copy_all==False and splitext(file)[-1] in ('.html', '.js', '.css', '.a', '.m', '.c', '.cpp', '.h', '.mm'):
continue
if file in ignoreFiles:
continue
from_ = os.path.join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_) or force:
if os.path.exists(to_): os.remove(to_)
shutil.copyfile(from_, to_)
# WARNING: This could be a time bomb waiting to happen, because it mangles
# the app bundle name for NO REASON. Or... does it?
def make_app_name(s):
r = re.compile('[0-9a-zA-Z_]')
buf = ''
for i in s:
if i=='-':
buf+='_'
continue
if r.match(i)!=None:
buf+=i
# if name starts with number, we simply append a k to it
if re.match('^[0-9]+',buf):
buf = 'k%s' % buf
return buf
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc+=node.data
elif node.nodeType == node.ELEMENT_NODE:
rc+=getText(node.childNodes)
return rc
def make_map(dict):
props = {}
curkey = None
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
curkey = str(getText(i.childNodes)).strip()
elif i.nodeName == 'dict':
props[curkey] = make_map(i)
curkey = None
elif i.nodeName == 'array':
s = i.getElementsByTagName('string')
if len(s):
txt = ''
for t in s:
txt+=getText(t.childNodes)
props[curkey]=txt
else:
props[curkey]=None
curkey = None
else:
if i.childNodes.length > 0:
props[curkey] = getText(i.childNodes)
else:
props[curkey] = i.nodeName
curkey = None
return props
def dump_resources_listing(rootdir,out):
out.write("\nFile listing for %s\n\n" % rootdir)
total = 0
for root, subFolders, files in os.walk(rootdir):
for file in files:
p = os.path.join(root,file)
s = os.path.getsize(p)
total+=s
s = "[%.0f]" % s
p = p[len(rootdir)+1:]
if p.startswith('build/android') or p.startswith('build/mobileweb'): continue
out.write(" %s %s\n" % (string.ljust(p,120),string.ljust(s,8)))
out.write("-" * 130)
out.write("\nTotal files: %.1f MB\n" % ((total/1024)/1024))
out.write("\n")
def dump_infoplist(infoplist,out):
plist = codecs.open(infoplist, encoding='utf-8').read()
out.write("Contents of Info.plist\n\n")
out.write(plist)
out.write("\n")
out.write("=" * 130)
out.write("\n\n")
def read_provisioning_profile(f,o):
f = open(f,'rb').read()
b = f.index('<?xml')
e = f.index('</plist>')
xml_content = f[b:e+8]
o.write("Reading provisioning profile:\n\n%s\n" % xml_content)
dom = parseString(xml_content)
dict = dom.getElementsByTagName('dict')[0]
props = make_map(dict)
return props
def get_aps_env(provisioning_profile):
entitlements = provisioning_profile['Entitlements']
if entitlements.has_key('aps-environment'):
return entitlements['aps-environment']
return None
def get_task_allow(provisioning_profile):
entitlements = provisioning_profile['Entitlements']
return entitlements['get-task-allow']
def get_app_prefix(provisioning_profile):
appid_prefix = provisioning_profile['ApplicationIdentifierPrefix']
return appid_prefix
def get_profile_uuid(provisioning_profile):
return provisioning_profile['UUID']
def generate_customized_entitlements(provisioning_profile,appid,uuid,command,out):
get_task_value = get_task_allow(provisioning_profile)
aps_env = get_aps_env(provisioning_profile)
buffer = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
"""
app_prefix = None
if command=='distribute':
app_prefix = get_app_prefix(provisioning_profile)
out.write("Using app_prefix = %s\n\n" % (app_prefix))
buffer+="""
<key>application-identifier</key>
<string>%s.%s</string>
""" % (app_prefix,appid)
buffer+="<key>get-task-allow</key>\n <%s/>" % get_task_value
if aps_env!=None:
buffer+="\n<key>aps-environment</key>\n <string>%s</string>" % aps_env
if command=='distribute':
buffer+="""
<key>keychain-access-groups</key>
<array>
<string>%s.%s</string>
</array>
""" % (app_prefix,appid)
buffer+="""
</dict>
</plist>"""
return buffer
def xcode_version():
output = run.run(['xcodebuild','-version'],True,False)
if output:
versionLine = output.split('\n')[0]
return float(versionLine.split(' ')[1].rpartition('.')[0])
def distribute_xc3(uuid, provisioning_profile, name, log):
# starting in 4.0, apple now requires submission through XCode
# this code mimics what xcode does on its own to package the
# application for the app uploader process
log.write("Creating distribution for xcode3...\n");
archive_uuid = str(uuid.uuid4()).upper()
archive_dir = os.path.join(os.path.expanduser("~/Library/MobileDevice/Archived Applications"),archive_uuid)
archive_app_dir = os.path.join(archive_dir,"%s.app" % name)
archive_appdsym_dir = os.path.join(archive_dir,"%s.app.dSYM" % name)
os.makedirs(archive_app_dir)
os.makedirs(archive_appdsym_dir)
os.system('ditto "%s.app" "%s"' % (name,archive_app_dir))
os.system('ditto "%s.app.dSYM" "%s"' % (name,archive_appdsym_dir))
archive_plist = os.path.join(archive_dir,'ArchiveInfo.plist')
log.write("Writing archive plist to: %s\n\n" % archive_plist)
profile_uuid = get_profile_uuid(provisioning_profile)
os.system("/usr/bin/plutil -convert xml1 -o \"%s\" \"%s\"" % (os.path.join(archive_dir,'Info.xml.plist'),os.path.join(archive_app_dir,'Info.plist')))
p = plistlib.readPlist(os.path.join(archive_dir,'Info.xml.plist'))
archive_metadata = {
'CFBundleIdentifier':p['CFBundleIdentifier'],
'CFBundleVersion':p['CFBundleVersion'],
'XCApplicationFilename':'%s.app' %name,
'XCApplicationName':name,
'XCArchivedDate': time.time() - 978307200.0,
'XCArchiveUUID':archive_uuid,
'XCInfoPlist' : p,
'XCProfileUUID': profile_uuid
}
log.write("%s\n\n" % archive_metadata)
plistlib.writePlist(archive_metadata,archive_plist)
os.remove(os.path.join(archive_dir,'Info.xml.plist'))
def distribute_xc4(name, icon, log):
# Locations of bundle, app binary, dsym info
log.write("Creating distribution for xcode4...\n");
timestamp = datetime.datetime.now()
date = timestamp.date().isoformat()
time = timestamp.time().strftime('%H-%M-%S')
archive_name = os.path.join(date,'%s_%s' % (name, time))
archive_bundle = os.path.join(os.path.expanduser("~/Library/Developer/Xcode/Archives"),"%s.xcarchive" % archive_name)
archive_app = os.path.join(archive_bundle,"Products","Applications","%s.app" % name)
archive_dsym = os.path.join(archive_bundle,"dSYM")
# create directories
if not os.access(archive_bundle, os.F_OK): os.makedirs(archive_bundle)
if not os.access(archive_app, os.F_OK): os.makedirs(archive_app)
if not os.access(archive_dsym, os.F_OK): os.makedirs(archive_dsym)
# copy app bundles into the approps. places
os.system('ditto "%s.app" "%s"' % (name,archive_app))
os.system('ditto "%s.app.dSYM" "%s"' % (name,archive_dsym))
# plist processing time - this is the biggest difference from XC3.
archive_info_plist = os.path.join(archive_bundle,'Info.plist')
log.write("Writing archive plist to: %s\n\n" % archive_info_plist)
# load existing plist values so that we can use them in generating the archive
# plist
os.system('/usr/bin/plutil -convert xml1 -o "%s" "%s"' % (os.path.join(archive_bundle,'Info.xml.plist'),os.path.join(archive_app,'Info.plist')))
project_info_plist = plistlib.readPlist(os.path.join(archive_bundle,'Info.xml.plist'))
appbundle = "Applications/%s.app" % name
# NOTE: We chop off the end '.' of 'CFBundleVersion' to provide the 'short' version
version = project_info_plist['CFBundleVersion']
app_version_ = version.split('.')
if(len(app_version_) > 3):
version = app_version_[0]+'.'+app_version_[1]+'.'+app_version_[2]
archive_info = {
'ApplicationProperties' : {
'ApplicationPath' : appbundle,
'CFBundleIdentifier' : project_info_plist['CFBundleIdentifier'],
'CFBundleShortVersionString' : version,
'IconPaths' : [os.path.join(appbundle,icon), os.path.join(appbundle,icon)]
},
'ArchiveVersion' : float(1),
'CreationDate' : datetime.datetime.utcnow(),
'Name' : name,
'SchemeName' : name
}
# write out the archive plist and clean up
log.write("%s\n\n" % archive_info)
plistlib.writePlist(archive_info,archive_info_plist)
os.remove(os.path.join(archive_bundle,'Info.xml.plist'))
# Workaround for dumb xcode4 bug that doesn't update the organizer unless
# files are touched in a very specific manner
temp = os.path.join(os.path.expanduser("~/Library/Developer/Xcode/Archives"),"temp")
os.rename(archive_bundle,temp)
os.rename(temp,archive_bundle)
def is_indexing_enabled(tiapp, simulator_dir, **kwargs):
# darwin versions:
# - 9.x: Leopard (10.5)
# - 10.x: Snow Leopard (10.6)
# - 11.x: Lion (10.7)
# for testing purposes
platform_release = kwargs.get("platform_release", platform.release())
darwin_version = [int(n) for n in platform_release.split(".")]
enable_mdfind = True
if tiapp.has_app_property('ti.ios.enablemdfind'):
enable_mdfind = tiapp.to_bool(tiapp.get_app_property('ti.ios.enablemdfind'))
# mdfind is specifically disabled, so don't use it
if not enable_mdfind:
return False
# pre-Leopard, mdfind / mdutil don't exist
if darwin_version[0] < 10:
return False
# for testing purposes
indexer_status = kwargs.get("indexer_status")
if indexer_status == None:
indexer_status = run.run(['mdutil', '-a', '-s'], True)
# An error occurred running mdutil, play it safe
if indexer_status == None:
return False
lines = indexer_status.splitlines()
mount_point_status = {}
for i in range(0, len(lines), 2):
mount_point = lines[i].rstrip(':')
if len(lines) > (i+1):
status = lines[i+1].strip('\t.')
# Only add mount points that the simulator_dir starts with
if simulator_dir.startswith(mount_point):
mount_point_status[mount_point] = status
# mdutil must be disabled if we don't get the right amount of output
else:
return False
if len(mount_point_status) > 0:
# There may be multiple volumes that have a mount point that the
# simulator_dir matches, so the one with the longest length
# *should* be the most specific / correct mount point.
mount_points = mount_point_status.keys()
mount_points.sort(lambda a, b: cmp(len(b), len(a)))
status = mount_point_status[mount_points[0]]
if 'Indexing enabled' in status:
return True
return False
HEADER = """/**
* Appcelerator Titanium Mobile
* This is generated code. Do not modify. Your changes *will* be lost.
* Generated code is Copyright (c) 2009-2011 by Appcelerator, Inc.
* All Rights Reserved.
*/
#import <Foundation/Foundation.h>
"""
DEFAULTS_IMPL_HEADER= """#import "TiUtils.h"
#import "ApplicationDefaults.h"
@implementation ApplicationDefaults
+ (NSMutableDictionary*) copyDefaults
{
NSMutableDictionary * _property = [[NSMutableDictionary alloc] init];\n
"""
FOOTER ="""
@end
"""
def copy_tiapp_properties(project_dir):
tiapp = ElementTree()
src_root = os.path.dirname(sys.argv[0])
assets_tiappxml = os.path.join(project_dir,'tiapp.xml')
if not os.path.exists(assets_tiappxml):
shutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml)
tiapp.parse(open(assets_tiappxml, 'r'))
impf = open("ApplicationDefaults.m",'w+')
appl_default = os.path.join(project_dir,'build','iphone','Classes','ApplicationDefaults.m')
impf.write(HEADER)
impf.write(DEFAULTS_IMPL_HEADER)
for property_el in tiapp.findall("property"):
name = property_el.get("name")
type = property_el.get("type")
value = property_el.text
if name == None: continue
if value == None: value = ""
if type == "string":
impf.write(""" [_property setObject:[TiUtils stringValue:@"%s"] forKey:@"%s"];\n"""%(value,name))
elif type == "bool":
impf.write(""" [_property setObject:[NSNumber numberWithBool:[TiUtils boolValue:@"%s"]] forKey:@"%s"];\n"""%(value,name))
elif type == "int":
impf.write(""" [_property setObject:[NSNumber numberWithInt:[TiUtils intValue:@"%s"]] forKey:@"%s"];\n"""%(value,name))
elif type == "double":
impf.write(""" [_property setObject:[NSNumber numberWithDouble:[TiUtils doubleValue:@"%s"]] forKey:@"%s"];\n"""%(value,name))
elif type == None:
impf.write(""" [_property setObject:[TiUtils stringValue:@"%s"] forKey:@"%s"];\n"""%(value,name))
else:
print """[WARN] Cannot set property "%s" , type "%s" not supported""" % (name,type)
if (len(tiapp.findall("property")) > 0) :
impf.write("\n return _property;\n}")
else:
impf.write("\n return NULL;\n}")
impf.write(FOOTER)
impf.close()
if open(appl_default,'r').read() == open('ApplicationDefaults.m','r').read():
os.remove('ApplicationDefaults.m')
return False
else:
shutil.copyfile('ApplicationDefaults.m',appl_default)
os.remove('ApplicationDefaults.m')
return True
def cleanup_app_logfiles(tiapp, log_id, iphone_version):
print "[DEBUG] finding old log files"
sys.stdout.flush()
simulator_dir = os.path.expanduser('~/Library/Application Support/iPhone Simulator/%s' % iphone_version)
# No need to clean if the directory doesn't exist
if not os.path.exists(simulator_dir):
return
results = None
# If the indexer is enabled, we can use spotlight for faster searching
if is_indexing_enabled(tiapp, simulator_dir):
print "[DEBUG] Searching for old log files with mdfind..."
sys.stdout.flush()
results = run.run(['mdfind',
'-onlyin', simulator_dir,
'-name', '%s.log' % log_id
], True)
# Indexer is disabled, revert to manual crawling
if results == None:
print "[DEBUG] Searching for log files without mdfind..."
sys.stdout.flush()
def find_all_log_files(folder, fname):
results = []
for root, dirs, files in os.walk(os.path.expanduser(folder)):
for file in files:
if fname==file:
fullpath = os.path.join(root, file)
results.append(fullpath)
return results
for f in find_all_log_files(simulator_dir, '%s.log' % log_id):
print "[DEBUG] removing old log file: %s" % f
sys.stdout.flush()
os.remove(f)
else:
for i in results.splitlines(False):
print "[DEBUG] removing old log file: %s" % i
os.remove(i)
#
# this script is invoked from our tooling but you can run from command line too if
# you know the arguments
#
# the current pattern is <command> [arguments]
#
# where the arguments are dependent on the command being passed
#
def main(args):
global script_ok
argc = len(args)
if argc < 2 or argc==2 and (args[1]=='--help' or args[1]=='-h'):
print "%s <command> <version> <project_dir> <appid> <name> [options]" % os.path.basename(args[0])
print
print "available commands: "
print
print " install install the app to itunes for testing on iphone"
print " simulator build and run on the iphone simulator"
print " adhoc build for adhoc distribution"
print " distribute build final distribution bundle"
print " xcode build from within xcode"
print " run build and run app from project folder"
sys.exit(1)
print "[INFO] One moment, building ..."
sys.stdout.flush()
start_time = time.time()
command = args[1].decode("utf-8")
ensure_dev_path()
target = 'Debug'
deploytype = 'development'
devicefamily = 'iphone'
debug = False
build_only = False
simulator = False
xcode_build = False
force_xcode = False
simtype = devicefamily
# when you run from xcode, we'll pass xcode as the command and the
# xcode script will simply pass some additional args as well as xcode
# will add some additional useful stuff to the ENVIRONMENT and we pull
# those values out here
if command == 'xcode':
xcode_build = True
src_root = os.environ['SOURCE_ROOT']
project_dir = os.path.abspath(os.path.join(src_root,'../','../'))
name = os.environ['PROJECT_NAME']
target = os.environ['CONFIGURATION']
appid = os.environ['TI_APPID']
arch = os.environ['CURRENT_ARCH']
sdk_name = os.environ['SDK_NAME']
iphone_version = sdk_name.replace('iphoneos','').replace('iphonesimulator','')
# SUPPORTED_DEVICE_FAMILIES 1 or 2 or both
# TARGETED_DEVICE_FAMILY 1 or 2
target_device = os.environ['TARGETED_DEVICE_FAMILY']
if target_device == '1':
devicefamily = 'iphone'
elif target_device == '2':
devicefamily = 'ipad'
elif target_device == '1,2':
devicefamily = 'universal'
if arch == 'i386':
# simulator always indicates simulator
deploytype = 'development'
else:
# if arch!=i386 indicates a build for device
if target=='Debug':
# non-simulator + debug build indicates test on device
deploytype = 'test'
else:
# non-simulator + release build indicates package for distribution
deploytype = 'production'
#Ensure the localization files are copied in the application directory
out_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
localecompiler.LocaleCompiler(name,project_dir,devicefamily,deploytype,out_dir).compile()
compiler = Compiler(project_dir,appid,name,deploytype)
compiler.compileProject(xcode_build,devicefamily,iphone_version)
script_ok = True
sys.exit(0)
else:
# the run command is when you run from titanium using the run command
# and it will run the project in the current directory immediately in the simulator
# from the command line
if command == 'run':
if argc < 3:
print "Usage: %s run <project_dir> [ios_version]" % os.path.basename(args[0])
sys.exit(1)
if argc == 3:
iphone_version = check_iphone_sdk('4.0')
else:
iphone_version = dequote(args[3].decode("utf-8"))
project_dir = os.path.expanduser(dequote(args[2].decode("utf-8")))
iphonesim = os.path.abspath(os.path.join(template_dir,'ios-sim'))
iphone_dir = os.path.abspath(os.path.join(project_dir,'build','iphone'))
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
appid = ti.properties['id']
name = ti.properties['name']
command = 'simulator' # switch it so that the rest of the stuff works
else:
iphone_version = dequote(args[2].decode("utf-8"))
iphonesim = os.path.abspath(os.path.join(template_dir,'ios-sim'))
project_dir = os.path.expanduser(dequote(args[3].decode("utf-8")))
appid = dequote(args[4].decode("utf-8"))
name = dequote(args[5].decode("utf-8"))
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
app_name = make_app_name(name)
iphone_dir = os.path.abspath(os.path.join(project_dir,'build','iphone'))
# We need to create the iphone dir if necessary, now that
# the tiapp.xml allows build target selection
if not os.path.isdir(iphone_dir):
if os.path.exists(iphone_dir):
os.remove(iphone_dir)
os.makedirs(iphone_dir)
project_xcconfig = os.path.join(iphone_dir,'project.xcconfig')
target = 'Release'
ostype = 'os'
version_file = None
log_id = None
provisioning_profile = None
debughost = None
debugport = None
postbuild_modules = []
# starting in 1.4, you don't need to actually keep the build/iphone directory
# if we don't find it, we'll just simply re-generate it
if not os.path.exists(iphone_dir):
from iphone import IPhone
print "[INFO] Detected missing project but that's OK. re-creating it..."
iphone_creator = IPhone(name,appid)
iphone_creator.create(iphone_dir,True)
sys.stdout.flush()
# we use different arguments dependent on the command
# pluck those out here
if command == 'distribute':
iphone_version = check_iphone_sdk(iphone_version)
link_version = iphone_version
dist_keychain = None
appuuid = dequote(args[6].decode("utf-8"))
dist_name = dequote(args[7].decode("utf-8"))
output_dir = os.path.expanduser(dequote(args[8].decode("utf-8")))
if argc > 9:
devicefamily = dequote(args[9].decode("utf-8"))
if argc > 10:
dist_keychain = dequote(args[10].decode("utf-8"))
print "[INFO] Switching to production mode for distribution"
deploytype = 'production'
elif command in ['simulator', 'build']:
link_version = check_iphone_sdk(iphone_version)
deploytype = 'development'
debug = True
simulator = command == 'simulator'
build_only = command == 'build'
target = 'Debug'
ostype = 'simulator'
if argc > 6:
devicefamily = dequote(args[6].decode("utf-8"))
if argc > 7:
simtype = dequote(args[7].decode("utf-8"))
else:
# 'universal' helpfully translates into iPhone here... just in case.
simtype = devicefamily
if argc > 8:
# this is host:port from the debugger
debughost = dequote(args[8].decode("utf-8"))
if debughost=='':
debughost = None
debugport = None
else:
debughost,debugport = debughost.split(":")
elif command in ['install', 'adhoc']:
iphone_version = check_iphone_sdk(iphone_version)
link_version = iphone_version
dist_keychain = None
appuuid = dequote(args[6].decode("utf-8"))
dist_name = dequote(args[7].decode("utf-8"))
if argc > 8:
devicefamily = dequote(args[8].decode("utf-8"))
if argc > 9:
dist_keychain = dequote(args[9].decode("utf-8"))
if command == 'install':
target = 'Debug'
deploytype = 'test'
elif command == 'adhoc':
target = 'Release'
deploytype = 'production'
# setup up the useful directories we need in the script
build_out_dir = os.path.abspath(os.path.join(iphone_dir,'build'))
build_dir = os.path.abspath(os.path.join(build_out_dir,'%s-iphone%s'%(target,ostype)))
app_dir = os.path.abspath(os.path.join(build_dir,name+'.app'))
binary = os.path.join(app_dir,name)
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
iphone_resources_dir = os.path.join(iphone_dir,'Resources')
version_file = os.path.join(iphone_resources_dir,'.version')
force_rebuild = read_project_version(project_xcconfig)!=sdk_version or not os.path.exists(version_file)
infoplist = os.path.join(iphone_dir,'Info.plist')
githash = None
custom_fonts = []
# if we're not running in the simulator we want to clean out the build directory
if command!='simulator' and os.path.exists(build_out_dir):
shutil.rmtree(build_out_dir)
if not os.path.exists(build_out_dir):
os.makedirs(build_out_dir)
# write out the build log, useful for debugging
o = codecs.open(os.path.join(build_out_dir,'build.log'),'w',encoding='utf-8')
def log(msg):
print msg
o.write(msg)
try:
buildtime = datetime.datetime.now()
o.write("%s\n" % ("="*80))
o.write("Appcelerator Titanium Diagnostics Build Log\n")
o.write("The contents of this file are useful to send to Appcelerator Support if\n")
o.write("reporting an issue to help us understand your environment, build settings\n")
o.write("and aid in debugging. Please attach this log to any issue that you report.\n")
o.write("%s\n\n" % ("="*80))
o.write("Starting build at %s\n\n" % buildtime.strftime("%m/%d/%y %H:%M"))
# write out the build versions info
versions_txt = read_config(os.path.join(template_dir,'..','version.txt'))
o.write("Build details:\n\n")
for key in versions_txt:
o.write(" %s=%s\n" % (key,versions_txt[key]))
o.write("\n\n")
if versions_txt.has_key('githash'):
githash = versions_txt['githash']
o.write("Script arguments:\n")
for arg in args:
o.write(unicode(" %s\n" % arg, 'utf-8'))
o.write("\n")
o.write("Building from: %s\n" % template_dir)
o.write("Platform: %s\n\n" % platform.version())
# print out path to debug
xcode_path=run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if xcode_path:
o.write("Xcode path is: %s\n" % xcode_path)
else:
o.write("Xcode path undetermined\n")
# find the module directory relative to the root of the SDK
titanium_dir = os.path.abspath(os.path.join(template_dir,'..','..','..','..'))
tp_module_dir = os.path.abspath(os.path.join(titanium_dir,'modules','iphone'))
force_destroy_build = command!='simulator'
detector = ModuleDetector(project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone')
module_lib_search_path, module_asset_dirs = locate_modules(modules, project_dir, app_dir, log)
common_js_modules = []
if len(missing_modules) != 0:
print '[ERROR] Could not find the following required iOS modules:'
for module in missing_modules:
print "[ERROR]\tid: %s\tversion: %s" % (module['id'], module['version'])
exit(1)
# search for modules that the project is using
# and make sure we add them to the compile
for module in modules:
if module.js:
common_js_modules.append(module)
continue
module_id = module.manifest.moduleid.lower()
module_version = module.manifest.version
module_lib_name = ('lib%s.a' % module_id).lower()
# check first in the local project
local_module_lib = os.path.join(project_dir, 'modules', 'iphone', module_lib_name)
local = False
if os.path.exists(local_module_lib):
module_lib_search_path.append([module_lib_name, local_module_lib])
local = True
log("[INFO] Detected third-party module: %s" % (local_module_lib))
else:
if module.lib is None:
module_lib_path = module.get_resource(module_lib_name)
log("[ERROR] Third-party module: %s/%s missing library at %s" % (module_id, module_version, module_lib_path))
sys.exit(1)
module_lib_search_path.append([module_lib_name, os.path.abspath(module.lib).rsplit('/',1)[0]])
log("[INFO] Detected third-party module: %s/%s" % (module_id, module_version))
force_xcode = True
if not local:
# copy module resources
img_dir = module.get_resource('assets', 'images')
if os.path.exists(img_dir):
dest_img_dir = os.path.join(app_dir, 'modules', module_id, 'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
module_asset_dirs.append([img_dir, dest_img_dir])
# copy in any module assets
module_assets_dir = module.get_resource('assets')
if os.path.exists(module_assets_dir):
module_dir = os.path.join(app_dir, 'modules', module_id)
module_asset_dirs.append([module_assets_dir, module_dir])
full_version = sdk_version
if 'version' in versions_txt:
full_version = versions_txt['version']
if 'timestamp' in versions_txt or 'githash' in versions_txt:
full_version += ' ('
if 'timestamp' in versions_txt:
full_version += '%s' % versions_txt['timestamp']
if 'githash' in versions_txt:
full_version += ' %s' % versions_txt['githash']
full_version += ')'
print "[INFO] Titanium SDK version: %s" % full_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
if simulator or build_only:
print "[INFO] iPhone simulated device: %s" % simtype
# during simulator we need to copy in standard built-in module files
# since we might not run the compiler on subsequent launches
for module_name in ('facebook','ui'):
img_dir = os.path.join(template_dir,'modules',module_name,'images')
dest_img_dir = os.path.join(app_dir,'modules',module_name,'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
module_asset_dirs.append([img_dir,dest_img_dir])
# when in simulator since we point to the resources directory, we need
# to explicitly copy over any files
ird = os.path.join(project_dir,'Resources','iphone')
if os.path.exists(ird):
module_asset_dirs.append([ird,app_dir])
# We also need to copy over the contents of 'platform/iphone'
platform_iphone = os.path.join(project_dir,'platform','iphone')
if os.path.exists(platform_iphone):
module_asset_dirs.append([platform_iphone,app_dir])
for ext in ('ttf','otf'):
for f in glob.glob('%s/*.%s' % (os.path.join(project_dir,'Resources'),ext)):
custom_fonts.append(f)
if not (simulator or build_only):
version = ti.properties['version']
# we want to make sure in debug mode the version always changes
version = "%s.%d" % (version,time.time())
if (deploytype != 'production'):
ti.properties['version']=version
pp = os.path.expanduser("~/Library/MobileDevice/Provisioning Profiles/%s.mobileprovision" % appuuid)
provisioning_profile = read_provisioning_profile(pp,o)
create_info_plist(ti, template_dir, project_dir, infoplist)
applogo = None
clean_build = False
# check to see if the appid is different (or not specified) - we need to re-generate
if read_project_appid(project_xcconfig)!=appid or not infoplist_has_appid(infoplist,appid):
clean_build = True
force_xcode = True
new_lib_hash = None
lib_hash = None
existing_git_hash = None
# this code simply tries and detect if we're building a different
# version of the project (or same version but built from different git hash)
# and if so, make sure we force rebuild so to propagate any code changes in
# source code (either upgrade or downgrade)
if os.path.exists(app_dir):
if os.path.exists(version_file):
line = open(version_file).read().strip()
lines = line.split(",")
v = lines[0]
log_id = lines[1]
if len(lines) > 2:
lib_hash = lines[2]
existing_git_hash = lines[3]
if lib_hash==None:
force_rebuild = True
else:
if template_dir==v and force_rebuild==False:
force_rebuild = False
else:
log_id = None
else:
force_rebuild = True
else:
force_rebuild = True
o.write("\ngithash=%s, existing_git_hash=%s\n" %(githash,existing_git_hash))
if githash!=existing_git_hash:
force_rebuild = True
# we want to read the md5 of the libTiCore.a library since it must match
# the current one we're building and if not, we need to force a rebuild since
# that means we've copied in a different version of the library and we need
# to rebuild clean to avoid linking errors
source_lib=os.path.join(template_dir,'libTiCore.a')
fd = open(source_lib,'rb')
m = hashlib.md5()
m.update(fd.read(1024)) # just read 1K, it's binary
new_lib_hash = m.hexdigest()
fd.close()
if new_lib_hash!=lib_hash:
force_rebuild=True
o.write("forcing rebuild since libhash (%s) not matching (%s)\n" % (lib_hash,new_lib_hash))
lib_hash=new_lib_hash
# when we force rebuild, we need to re-compile and re-copy source, libs etc
if force_rebuild:
o.write("Performing full rebuild\n")
print "[INFO] Performing full rebuild. This will take a little bit. Hold tight..."
sys.stdout.flush()
project = Projector(name,sdk_version,template_dir,project_dir,appid)
project.create(template_dir,iphone_dir)
force_xcode = True
if os.path.exists(app_dir): shutil.rmtree(app_dir)
# we have to re-copy if we have a custom version
create_info_plist(ti, template_dir, project_dir, infoplist)
# since compiler will generate the module dependencies, we need to
# attempt to compile to get it correct for the first time.
compiler = Compiler(project_dir,appid,name,deploytype)
compiler.compileProject(xcode_build,devicefamily,iphone_version,True)
else:
if simulator:
softlink_for_simulator(project_dir,app_dir)
contents="TI_VERSION=%s\n"% sdk_version
contents+="TI_SDK_DIR=%s\n" % template_dir.replace(sdk_version,'$(TI_VERSION)')
contents+="TI_APPID=%s\n" % appid
contents+="OTHER_LDFLAGS[sdk=iphoneos*]=$(inherited) -weak_framework iAd\n"
contents+="OTHER_LDFLAGS[sdk=iphonesimulator*]=$(inherited) -weak_framework iAd\n"
contents+="#include \"module\"\n"
xcconfig = open(project_xcconfig,'w+')
xccontents = xcconfig.read()
if contents!=xccontents:
o.write("writing contents of %s:\n\n%s\n" % (project_xcconfig,contents))
o.write("old contents\n\n%s\n" % (xccontents))
xcconfig.write(contents)
xcconfig.close()
else:
o.write("Skipping writing contents of xcconfig %s\n" % project_xcconfig)
# write out any modules into the xcode project
# this must be done after project create above or this will be overriden
link_modules(module_lib_search_path, name, iphone_dir)
cwd = os.getcwd()
# check to see if the symlink exists and that it points to the
# right version of the library
libticore = os.path.join(template_dir,'libTiCore.a')
make_link = True
symlink = os.path.join(iphone_dir,'lib','libTiCore.a')
if os.path.islink(symlink):
path = os.path.realpath(symlink)
if path.find(sdk_version) > 0:
make_link = False
if make_link:
libdir = os.path.join(iphone_dir,'lib')
if not os.path.exists(libdir): os.makedirs(libdir)
os.chdir(libdir)
# a broken link will not return true on os.path.exists
# so we need to use brute force
try:
os.unlink("libTiCore.a")
except:
pass
try:
os.symlink(libticore,"libTiCore.a")
except:
pass
os.chdir(cwd)
# if the lib doesn't exist, force a rebuild since it's a new build
if not os.path.exists(os.path.join(iphone_dir,'lib','libtiverify.a')):
shutil.copy(os.path.join(template_dir,'libtiverify.a'),os.path.join(iphone_dir,'lib','libtiverify.a'))
if not os.path.exists(os.path.join(iphone_dir,'lib','libti_ios_debugger.a')):
shutil.copy(os.path.join(template_dir,'libti_ios_debugger.a'),os.path.join(iphone_dir,'lib','libti_ios_debugger.a'))
# compile JSS files
cssc = csscompiler.CSSCompiler(os.path.join(project_dir,'Resources'),devicefamily,appid)
app_stylesheet = os.path.join(iphone_dir,'Resources','stylesheet.plist')
asf = codecs.open(app_stylesheet,'w','utf-8')
asf.write(cssc.code)
asf.close()
# compile debugger file
debug_plist = os.path.join(iphone_dir,'Resources','debugger.plist')
# Force an xcodebuild if the debugger.plist has changed
force_xcode = write_debugger_plist(debughost, debugport, template_dir, debug_plist)
if command not in ['simulator', 'build']:
# compile plist into binary format so it's faster to load
# we can be slow on simulator
os.system("/usr/bin/plutil -convert binary1 \"%s\"" % app_stylesheet)
o.write("Generated the following stylecode code:\n\n")
o.write(cssc.code)
o.write("\n")
# generate the Info.plist file with the appropriate device family
if devicefamily!=None:
applogo = ti.generate_infoplist(infoplist,appid,devicefamily,project_dir,iphone_version)
else:
applogo = ti.generate_infoplist(infoplist,appid,'iphone',project_dir,iphone_version)
# attempt to load any compiler plugins
if len(ti.properties['plugins']) > 0:
local_compiler_dir = os.path.abspath(os.path.join(project_dir,'plugins'))
tp_compiler_dir = os.path.abspath(os.path.join(titanium_dir,'plugins'))
if not os.path.exists(tp_compiler_dir) and not os.path.exists(local_compiler_dir):
o.write("+ Missing plugins directory at %s\n" % tp_compiler_dir)
print "[ERROR] Build Failed (Missing plugins directory). Please see output for more details"
sys.stdout.flush()
sys.exit(1)
compiler_config = {
'platform':'ios',
'devicefamily':devicefamily,
'simtype':simtype,
'tiapp':ti,
'project_dir':project_dir,
'titanium_dir':titanium_dir,
'appid':appid,
'iphone_version':iphone_version,
'template_dir':template_dir,
'project_name':name,
'command':command,
'deploytype':deploytype,
'build_dir':build_dir,
'app_name':app_name,
'app_dir':app_dir,
'iphone_dir':iphone_dir
}
for plugin in ti.properties['plugins']:
local_plugin_file = os.path.join(local_compiler_dir,plugin['name'],'plugin.py')
plugin_file = os.path.join(tp_compiler_dir,plugin['name'],plugin['version'],'plugin.py')
if not os.path.exists(local_plugin_file) and not os.path.exists(plugin_file):
o.write("+ Missing plugin at %s (checked %s also)\n" % (plugin_file,local_plugin_file))
print "[ERROR] Build Failed (Missing plugin for %s). Please see output for more details" % plugin['name']
sys.stdout.flush()
sys.exit(1)
o.write("+ Detected plugin: %s/%s\n" % (plugin['name'],plugin['version']))
print "[INFO] Detected compiler plugin: %s/%s" % (plugin['name'],plugin['version'])
code_path = plugin_file
if os.path.exists(local_plugin_file):
code_path = local_plugin_file
o.write("+ Loading compiler plugin at %s\n" % code_path)
compiler_config['plugin']=plugin
fin = open(code_path, 'rb')
m = hashlib.md5()
m.update(open(code_path,'rb').read())
code_hash = m.hexdigest()
p = imp.load_source(code_hash, code_path, fin)
module_functions = dict(inspect.getmembers(p, inspect.isfunction))
if module_functions.has_key('postbuild'):
print "[DBEUG] Plugin has postbuild"
o.write("+ Plugin has postbuild")
postbuild_modules.append((plugin['name'], p))
p.compile(compiler_config)
fin.close()
try:
os.chdir(iphone_dir)
# we always target backwards to 4.0 even when we use a later
# version iOS SDK. this ensures our code will run on old devices
# no matter which SDK we compile with
deploy_target = "IPHONEOS_DEPLOYMENT_TARGET=4.0"
device_target = 'TARGETED_DEVICE_FAMILY=1' # this is non-sensical, but you can't pass empty string
# clean means we need to nuke the build
if clean_build or force_destroy_build:
print "[INFO] Performing clean build"
o.write("Performing clean build...\n")
if os.path.exists(app_dir):
shutil.rmtree(app_dir)
if not os.path.exists(app_dir): os.makedirs(app_dir)
# compile localization files
# Using app_name here will cause the locale to be put in the WRONG bundle!!
localecompiler.LocaleCompiler(name,project_dir,devicefamily,deploytype).compile()
# copy any module resources
if len(module_asset_dirs)>0:
for e in module_asset_dirs:
copy_module_resources(e[0],e[1],True)
# copy CommonJS modules
for module in common_js_modules:
#module_id = module.manifest.moduleid.lower()
#module_dir = os.path.join(app_dir, 'modules', module_id)
#if os.path.exists(module_dir) is False:
# os.makedirs(module_dir)
shutil.copy(module.js, app_dir)
# copy artworks, if appropriate
if command in ['adhoc', 'install', 'distribute']:
artworks = ['iTunesArtwork', 'iTunesArtwork@2x']
for artwork in artworks:
if os.path.exists(os.path.join(project_dir, artwork)):
shutil.copy(os.path.join(project_dir, artwork), app_dir)
# copy any custom fonts in (only runs in simulator)
# since we need to make them live in the bundle in simulator
if len(custom_fonts)>0:
for f in custom_fonts:
font = os.path.basename(f)
app_font_path = os.path.join(app_dir, font)
print "[INFO] Detected custom font: %s" % font
if os.path.exists(app_font_path):
os.remove(app_font_path)
try:
shutil.copy(f,app_dir)
except shutil.Error, e:
print "[WARN] Not copying %s: %s" % (font, e)
# dump out project file info
if command not in ['simulator', 'build']:
dump_resources_listing(project_dir,o)
dump_infoplist(infoplist,o)
install_logo(ti, applogo, project_dir, template_dir, app_dir)
install_defaults(project_dir, template_dir, iphone_resources_dir)
extra_args = None
recompile = copy_tiapp_properties(project_dir)
# if the anything changed in the application defaults then we have to force a xcode build.
if recompile == True:
force_xcode = recompile
if devicefamily!=None:
# Meet the minimum requirements for ipad when necessary
if devicefamily == 'ipad' or devicefamily == 'universal':
device_target="TARGETED_DEVICE_FAMILY=2"
# NOTE: this is very important to run on device -- i dunno why
# xcode warns that 3.2 needs only armv7, but if we don't pass in
# armv6 we get crashes on device
extra_args = ["VALID_ARCHS=armv6 armv7 i386"]
# Additionally, if we're universal, change the device family target
if devicefamily == 'universal':
device_target="TARGETED_DEVICE_FAMILY=1,2"
kroll_coverage = ""
if ti.has_app_property("ti.ios.enablecoverage"):
enable_coverage = ti.to_bool(ti.get_app_property("ti.ios.enablecoverage"))
if enable_coverage:
kroll_coverage = "KROLL_COVERAGE=1"
def execute_xcode(sdk,extras,print_output=True):
config = name
if devicefamily=='ipad':
config = "%s-iPad" % config
if devicefamily=='universal':
config = "%s-universal" % config
# these are the arguments for running a command line xcode build
args = ["xcodebuild","-target",config,"-configuration",target,"-sdk",sdk]
if extras!=None and len(extras)>0:
args += extras
args += [deploy_target,device_target]
if extra_args!=None and len(extra_args)>0:
args += extra_args
o.write("Starting Xcode compile with the following arguments:\n\n")
for arg in args: o.write(" %s\n" % arg)
o.write("\napp_id = %s\n" % appid)
o.write("\n\n")
o.flush()
if print_output:
print "[DEBUG] compile checkpoint: %0.2f seconds" % (time.time()-start_time)
print "[INFO] Executing XCode build..."
print "[BEGIN_VERBOSE] Executing XCode Compiler <span>[toggle output]</span>"
# h/t cbarber for this; occasionally the PCH header info gets out of sync
# with the PCH file if you do the "wrong thing" and xcode isn't
# smart enough to pick up these changes (since the PCH file hasn't 'changed').
run.run(['touch', '%s_Prefix.pch' % ti.properties['name']], debug=False)
output = run.run(args,False,False,o)
if print_output:
print output
print "[END_VERBOSE]"
sys.stdout.flush()
# Output already written by run.run
#o.write(output)
# check to make sure the user doesn't have a custom build location
# configured in Xcode which currently causes issues with titanium
idx = output.find("TARGET_BUILD_DIR ")
if idx > 0:
endidx = output.find("\n",idx)
if endidx > 0:
target_build_dir = dequote(output[idx+17:endidx].strip())
if not os.path.samefile(target_build_dir,build_dir):
o.write("+ TARGET_BUILD_DIR = %s\n" % target_build_dir)
print "[ERROR] Your TARGET_BUILD_DIR is incorrectly set. Most likely you have configured in Xcode a customized build location. Titanium does not currently support this configuration."
print "[ERROR] Expected dir %s, was: %s" % (build_dir,target_build_dir)
sys.stdout.flush()
sys.exit(1)
# look for build error
if output.find("** BUILD FAILED **")!=-1 or output.find("ld returned 1")!=-1 or output.find("The following build commands failed:")!=-1:
o.write("+ Detected build failure\n")
print "[ERROR] Build Failed. Please see output for more details"
sys.stdout.flush()
sys.exit(1)
o.write("+ Looking for application binary at %s\n" % binary)
# make sure binary exists
if not os.path.exists(binary):
o.write("+ Missing application binary at %s\n" % binary)
print "[ERROR] Build Failed (Missing app at %s). Please see output for more details" % binary
sys.stdout.flush()
sys.exit(1)
# look for a code signing error
error = re.findall(r'Code Sign error:(.*)',output)
if len(error) > 0:
o.write("+ Detected code sign error: %s\n" % error[0])
print "[ERROR] Code sign error: %s" % error[0].strip()
sys.stdout.flush()
sys.exit(1)
def run_postbuild():
try:
if postbuild_modules:
for p in postbuild_modules:
o.write("Running postbuild %s" % p[0])
print "[INFO] Running postbuild %s..." % p[0]
p[1].postbuild()
except Exception,e:
o.write("Error in post-build: %s" % e)
print "[ERROR] Error in post-build: %s" % e
# build the final release distribution
args = []
if command not in ['simulator', 'build']:
# allow the project to have its own custom entitlements
custom_entitlements = os.path.join(project_dir,"Entitlements.plist")
entitlements_contents = None
if os.path.exists(custom_entitlements):
entitlements_contents = open(custom_entitlements).read()
o.write("Found custom entitlements: %s\n" % custom_entitlements)
else:
# attempt to customize it by reading prov profile
entitlements_contents = generate_customized_entitlements(provisioning_profile,appid,appuuid,command,o)
o.write("Generated the following entitlements:\n\n%s\n\n" % entitlements_contents)
f=open(os.path.join(iphone_resources_dir,'Entitlements.plist'),'w+')
f.write(entitlements_contents)
f.close()
args+=["CODE_SIGN_ENTITLEMENTS=Resources/Entitlements.plist"]
# only build if force rebuild (different version) or
# the app hasn't yet been built initially
if ti.properties['guid']!=log_id or force_xcode:
log_id = ti.properties['guid']
f = open(version_file,'w+')
f.write("%s,%s,%s,%s" % (template_dir,log_id,lib_hash,githash))
f.close()
# both simulator and build require an xcodebuild
if command in ['simulator', 'build']:
debugstr = ''
if debughost:
debugstr = 'DEBUGGER_ENABLED=1'
if force_rebuild or force_xcode or not os.path.exists(binary):
execute_xcode("iphonesimulator%s" % link_version,["GCC_PREPROCESSOR_DEFINITIONS=__LOG__ID__=%s DEPLOYTYPE=development TI_DEVELOPMENT=1 DEBUG=1 TI_VERSION=%s %s %s" % (log_id,sdk_version,debugstr,kroll_coverage)],False)
run_postbuild()
o.write("Finishing build\n")
if command == 'simulator':
# first make sure it's not running
kill_simulator()
#Give the kill command time to finish
time.sleep(2)
# sometimes the simulator doesn't remove old log files
# in which case we get our logging jacked - we need to remove
# them before running the simulator
cleanup_app_logfiles(ti, log_id, iphone_version)
sim = None
# this handler will simply catch when the simulator exits
# so we can exit this script
def handler(signum, frame):
global script_ok
print "[INFO] Simulator is exiting"
if not log == None:
try:
os.system("kill -2 %s" % str(log.pid))
except:
pass
if not sim == None and signum!=3:
try:
os.system("kill -3 %s" % str(sim.pid))
except:
pass
kill_simulator()
script_ok = True
sys.exit(0)
# make sure we're going to stop this script whenever
# the simulator exits
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGABRT, handler)
signal.signal(signal.SIGTERM, handler)
print "[INFO] Launching application in Simulator"
sys.stdout.flush()
sys.stderr.flush()
# set the DYLD_FRAMEWORK_PATH environment variable for the following Popen iphonesim command
# this allows the XCode developer folder to be arbitrarily named
xcodeselectpath = os.popen("/usr/bin/xcode-select -print-path").readline().rstrip('\n')
iphoneprivateframeworkspath = xcodeselectpath + '/Platforms/iPhoneSimulator.platform/Developer/Library/PrivateFrameworks'
os.putenv('DYLD_FRAMEWORK_PATH', iphoneprivateframeworkspath)
# launch the simulator
# Awkard arg handling; we need to take 'retina' to be a device type,
# even though it's really not (it's a combination of device type and configuration).
# So we translate it into two args:
if simtype == 'retina':
# Manually overrule retina type if we're an ipad
if devicefamily == 'ipad':
simtype = 'ipad'
else:
simtype = 'iphone --retina'
if devicefamily==None:
sim = subprocess.Popen("\"%s\" launch \"%s\" --sdk %s" % (iphonesim,app_dir,iphone_version),shell=True,cwd=template_dir)
else:
sim = subprocess.Popen("\"%s\" launch \"%s\" --sdk %s --family %s" % (iphonesim,app_dir,iphone_version,simtype),shell=True,cwd=template_dir)
# activate the simulator window
command = 'osascript -e "tell application \\\"%s/Platforms/iPhoneSimulator.platform/Developer/Applications/iPhone Simulator.app\\\" to activate"'
os.system(command%xcodeselectpath)
end_time = time.time()-start_time
print "[INFO] Launched application in Simulator (%0.2f seconds)" % end_time
sys.stdout.flush()
sys.stderr.flush()
# give the simulator a bit to get started and up and running before
# starting the logger
time.sleep(2)
logger = os.path.realpath(os.path.join(template_dir,'logger.py'))
# start the logger tail process. this will simply read the output
# from the logs and stream them back to Titanium Developer on the console
log = subprocess.Popen([
logger,
str(log_id)+'.log',
iphone_version
])
# wait (blocking this script) until the simulator exits
try:
os.waitpid(sim.pid,0)
except SystemExit:
# If the user terminates the app here, it's via a
# soft kill of some kind (i.e. like what TiDev does)
# and so we should suppress the usual error message.
# Fixes #2086
pass
print "[INFO] Application has exited from Simulator"
# in this case, the user has exited the simulator itself
# and not clicked Stop Emulator from within Developer so we kill
# our tail log process but let simulator keep running
if not log == None:
try:
os.system("kill -2 %s" % str(log.pid))
except:
pass
script_ok = True
###########################################################################
# END OF SIMULATOR COMMAND
###########################################################################
#
# this command is run for installing an app on device or packaging for adhoc distribution
#
elif command in ['install', 'adhoc']:
debugstr = ''
if debughost:
debugstr = 'DEBUGGER_ENABLED=1'
args += [
"GCC_PREPROCESSOR_DEFINITIONS=DEPLOYTYPE=test TI_TEST=1 %s %s" % (debugstr, kroll_coverage),
"PROVISIONING_PROFILE=%s" % appuuid
]
if command == 'install':
args += ["CODE_SIGN_IDENTITY=iPhone Developer: %s" % dist_name]
elif command == 'adhoc':
args += ["CODE_SIGN_IDENTITY=iPhone Distribution: %s" % dist_name]
if dist_keychain is not None:
args += ["OTHER_CODE_SIGN_FLAGS=--keychain %s" % dist_keychain]
args += ["DEPLOYMENT_POSTPROCESSING=YES"]
execute_xcode("iphoneos%s" % iphone_version,args,False)
if command == 'install':
print "[INFO] Installing application in iTunes ... one moment"
sys.stdout.flush()
dev_path = run.run(['xcode-select','-print-path'],True,False).rstrip()
package_path = os.path.join(dev_path,'Platforms/iPhoneOS.platform/Developer/usr/bin/PackageApplication')
if os.path.exists(package_path):
o.write("+ Preparing to run %s\n"%package_path)
output = run.run([package_path,app_dir],True)
o.write("+ Finished running %s\n"%package_path)
if output: o.write(output)
# for install, launch itunes with the app
ipa = os.path.join(os.path.dirname(app_dir),"%s.ipa" % name)
o.write("+ IPA file should be at %s\n" % ipa);
# it appears that sometimes this command above fails on certain installs
# or is missing. let's just open if we have it otherwise, open the app
# directory
if not os.path.exists(ipa):
# just open the app dir itself
o.write("+ IPA didn't exist at %s\n" % ipa)
o.write("+ Will try and open %s\n" % app_dir)
ipa = app_dir
if command == 'install':
# to force iTunes to install our app, we simply open the IPA
# file in itunes
cmd = "open -b com.apple.itunes \"%s\"" % ipa
o.write("+ Executing the command: %s\n" % cmd)
os.system(cmd)
o.write("+ After executing the command: %s\n" % cmd)
# now run our applescript to tell itunes to sync to get
# the application on the phone
ass = os.path.join(template_dir,'itunes_sync.scpt')
cmd = "osascript \"%s\"" % ass
o.write("+ Executing the command: %s\n" % cmd)
os.system(cmd)
o.write("+ After executing the command: %s\n" % cmd)
print "[INFO] iTunes sync initiated"
o.write("Finishing build\n")
sys.stdout.flush()
script_ok = True
run_postbuild()
###########################################################################
# END OF INSTALL/ADHOC COMMAND
###########################################################################
#
# this command is run for packaging an app for distribution
#
elif command == 'distribute':
deploytype = "production"
args += [
"GCC_PREPROCESSOR_DEFINITIONS=DEPLOYTYPE=%s TI_PRODUCTION=1" % deploytype,
"PROVISIONING_PROFILE=%s" % appuuid,
"CODE_SIGN_IDENTITY=iPhone Distribution: %s" % dist_name,
"DEPLOYMENT_POSTPROCESSING=YES"
]
if dist_keychain is not None:
args += ["OTHER_CODE_SIGN_FLAGS=--keychain %s" % dist_keychain]
execute_xcode("iphoneos%s" % iphone_version,args,False)
# switch to app_bundle for zip
os.chdir(build_dir)
if xcode_version() >= 4.0:
distribute_xc4(name, applogo, o)
else:
distribute_xc3(uuid, provisioning_profile, name, o)
# open xcode + organizer after packaging
# Have to force the right xcode open...
xc_path = run.run(['xcode-select','-print-path'],True,False).rstrip()
xc_app_index = xc_path.find('/Xcode.app/')
if (xc_app_index >= 0):
xc_path = xc_path[0:xc_app_index+10]
else:
xc_path = os.path.join(xc_path,'Applications','Xcode.app')
o.write("Launching xcode: %s\n" % xc_path)
os.system('open -a %s' % xc_path)
ass = os.path.join(template_dir,'xcode_organizer.scpt')
cmd = "osascript \"%s\"" % ass
os.system(cmd)
o.write("Finishing build\n")
script_ok = True
run_postbuild()
###########################################################################
# END OF DISTRIBUTE COMMAND
###########################################################################
finally:
os.chdir(cwd)
except:
print "[ERROR] Error: %s" % traceback.format_exc()
if not script_ok:
o.write("\nException detected in script:\n")
traceback.print_exc(file=o)
o.close()
sys.exit(1)
else:
o.close()
if __name__ == "__main__":
main(sys.argv)
sys.exit(0)
| xissy/titanium-mobile-sdk | iphone/builder.py | Python | apache-2.0 | 58,468 | [
"VisIt"
] | aade8b76cfd17cbc5ca12997019238a469aae88b93ca7faeeeb1b9abc9ed6344 |
#!/usr/bin/env python
'''
written by Rose A. Finn on 2/3/2013
GOAL:
- create a mastertable for each LCS cluster that uses the NASA-Sloan atlas as the parent sample
METHOD:
- match NSA to 24-micron apex catalog
- pull AGC data (NSA contains AGC number)
- match with galaxy zoo info
- append new columns using atpy
- write out new table using atpy
UPDATES:
2015/07/13
this program is only used to calculate local density and write out local density files
'''
from pylab import *
import os, atpy
from LCScommon import *
#import ReadAGCsav
from pyraf import iraf
import mystuff as my
import argparse
parser=argparse.ArgumentParser()
parser.add_argument('-v',"--velocitycut",help='velocity cut to use when calculating local density. Galaxies within +/- vcut will be used. Default value is 1500 km/s.',default=1500.,type=float)
args = parser.parse_args()
class cluster:
def __init__(self,clustername):
self.prefix=clustername
self.image24=homedir+'research/LocalClusters/Images/'+self.prefix+'/24um/Full'+self.prefix+'ch1rf_mosaic_minus_median_extract.fits'
self.noise24=homedir+'research/LocalClusters/Images/'+self.prefix+'/24um/Full'+self.prefix+'ch1rf_mosaic_unc.fits'
if (clustername.find('A1367') > -1):
self.image24='/home/rfinn/research/LocalClusters/Images/'+self.prefix+'/24um/'+self.prefix+'ch1r1_mosaic_minus_median_extract.fits'
self.noise24='/home/rfinn/research/LocalClusters/Images/'+self.prefix+'/24um/'+self.prefix+'ch1r1_mosaic_unc.fits'
elif (clustername.find('Herc')>-1):#for Abell 1367 and Hercules cluster
self.image24='/home/rfinn/research/LocalClusters/Images/'+self.prefix+'/24um/'+self.prefix+'ch1r1_mosaic_minus_median_extract.fits'
self.noise24='/home/rfinn/research/LocalClusters/Images/'+self.prefix+'/24um/'+self.prefix+'ch1r1_mosaic_unc.fits'
# read NSA table for each cluster
infile=homedir+'research/NSA/'+self.prefix+'_NSA.Fits'
self.ndat=atpy.Table(infile,type='fits')
self.nsadir=homedir+'research/NSA/'
self.cra=clusterRA[self.prefix]
self.cdec=clusterDec[self.prefix]
self.cz=clusterz[self.prefix]
self.biweightvel=clusterbiweightcenter[self.prefix]
self.biweightscale=clusterbiweightscale[self.prefix]
self.r200=2.02*(self.biweightscale)/1000./sqrt(OmegaL+OmegaM*(1.+self.cz)**3)*H0/70. # in Mpc
self.r200deg=self.r200*1000./my.DA(self.cz,h)/3600.
self.cdMpc=self.biweightvel/H0
self.cdcm=self.cdMpc*3.e24
self.csigma=self.biweightscale
self.mcl=my.clusterMass(self.csigma,self.cz,h)
self.AngDistance=my.DA(self.cz,h)
def geton24imageflag(self):
iraf.imgets(image=self.image24,param='CD1_1')#get x plate scale on rotated image
#print iraf.imgets.value
xplate=abs(float(iraf.imgets.value))#deg/pixel
dpix=deltaCutout/3600./xplate/2. # requires a 50 arcsec buffer b/w center of galaxy and edge of image
iraf.imgets(image=self.image24,param='naxis1')#get x value corresponding to RA
xpixmax=(int(iraf.imgets.value))#deg/pixel
iraf.imgets(image=self.image24,param='naxis2')#get x value corresponding to RA
ypixmax=(int(iraf.imgets.value))#deg/pixel
# write RA and Dec to ascii file
t = atpy.Table()
t.add_column('RA',self.ndat.RA)
t.add_column('DEC',self.ndat.DEC)
outfile=self.nsadir+self.prefix+'_RADEC.txt'
out1=open(outfile,'w')
for i in range(len(self.ndat.RA)):
s='%12.8f %12.8f \n'%(self.ndat.RA[i],self.ndat.DEC[i])
out1.write(s)
out1.close()
# transform RA and Dec to pixels using wcsctran
print 'transforming 24um coords'
outcoords=self.nsadir+str(self.prefix)+'xy24.txt'
if os.path.exists(outcoords):
os.remove(outcoords)
iraf.imcoords.wcsctran(image=self.image24,input=outfile,output=outcoords,inwcs='world',outwcs='logical',verbose='no')
# read in pixel coordinates
self.mipscoords=atpy.Table(outcoords,type='ascii')
x=self.mipscoords['col1']
y=self.mipscoords['col2']
self.On24ImageFlag= (x > dpix) & (x < (xpixmax-dpix)) & (y > dpix) & (y < (ypixmax-dpix))
print self.prefix,': # on 24um image = ',sum(self.On24ImageFlag)
# check to see if pixels are off image and set flag accordingly
def localdensity5(self):
# get local density by summing mass w/in 300 kpc
# NOTE:
# - this measure will be unreliable for galaxies near the edge of the 3 deg area
# - this will be fine for galaxies on 24um image
sdssflag=self.ndat.ISDSS > -1
DA=self.AngDistance
x=self.ndat.RA
y=self.ndat.DEC
xref=self.ndat.RA[sdssflag]
yref=self.ndat.DEC[sdssflag]
n1=6
sigma5=zeros(len(x),'d')
sigma10=zeros(len(x),'d')
for i in range(len(x)):
deltav=abs(self.ndat.ZDIST[sdssflag]-self.ndat.ZDIST[i])*3.e5
d=sqrt((x[i]-xref)**2+(y[i]-yref)**2)*3600./1000.*DA#d in Mpc
vflag = deltav < args.velocitycut
#print i,sum(vflag)
d=d[vflag]#apply a velocity cut of +/- 1500 km/s
d.sort()#sort in ascending order, zeroth element is distance from galaxy to itself
sig=0
if len(d) < n1:
print 'not enough points to calculate local density'
print 'only have ',len(d),' galaxies w/in 1500 km/s'
print 'skipping to next entry'
continue
else:
sigma5[i]=1./(4.*pi)*(1.*5)/(d[5])**2
try:
sigma10[i]=1./(4.*pi)*(1.*10)/(d[10])**2
except IndexError:
continue
self.sigma_5=sigma5
self.sigma_10=sigma10
def localdensity(self):
# get local density by summing mass w/in 300 kpc
# NOTE:
# - this measure will be unreliable for galaxies near the edge of the 3 deg area
# - this will be fine for galaxies on 24um image
sdssflag=self.ndat.ISDSS > -1
DA=self.AngDistance
x=self.ndat.RA
y=self.ndat.DEC
xref=self.ndat.RA[sdssflag]
yref=self.ndat.DEC[sdssflag]
n1=3
n2=6
sigma=zeros(len(x),'d')
for i in range(len(x)):
deltav=abs(self.ndat.ZDIST[sdssflag]-self.ndat.ZDIST[i])*3.e5
d=sqrt((x[i]-xref)**2+(y[i]-yref)**2)*3600./1000.*DA#d in Mpc
d=d[deltav<args.velocitycut]#apply a velocity cut of +/- 1500 km/s
d.sort()#sort in ascending order, zeroth element is distance from galaxy to itself
sig=0
'''
if len(d) < n2:
print 'not enough points to calculate local density'
print 'only have ',len(d),' galaxies w/in ',args.velocitycut
if len(d) < n1:
print 'ut oh!'
for j in range(n1,len(d)):
sig += (1.*j)/(d[j])**2
else:
for j in range(n1,n2+1):
sig += (1.*j)/(d[j])**2
'''
sigma[i]=1./(4.*pi)*d[1]
self.sigma_nn=sigma
return
def localdensitybymass(self):#find local density, using nearest neighbors n1 through n2
DA=self.AngDistance #kpc/arcsec
angDist_300kpc=300./DA/3600. # angular dist corresponding to 300 kpc, in degrees
x=self.ndat.RA
y=self.ndat.DEC
# should probably use SDSS as the reference sample, but for now, using NSA
sdssflag=self.ndat.ISDSS > -1
xref=self.ndat.RA[sdssflag]
yref=self.ndat.DEC[sdssflag]
self.rhomass=zeros(len(x),'d')
for i in range(len(x)):
deltav=abs(self.ndat.ZDIST[sdssflag]-self.ndat.ZDIST[i])*3.e5
d=sqrt((x[i]-xref)**2+(y[i]-yref)**2)*3600./1000.*DA#d in Mpc
d=d[deltav<args.velocitycut]#apply a velocity cut of +/- 1500 km/s
neighbor_flag=d < .3
self.rhomass[i]=sum(self.ndat.MASS[neighbor_flag])
#print i, self.rhomass[i],sum(neighbor_flag)
def matchnsa2sdssphot(self):
return
def writeoutput(self):
# append flag to NSA table
# create new table
ldat=atpy.Table()
#ldat.add_column('On24ImageFlag',self.On24ImageFlag)
# will add more columns here as time permits
ldat.add_column('NSAID',self.ndat.NSAID,unit='',description='NSA ID')
ldat.add_column('RA',self.ndat.RA,unit='deg',description='RA')
ldat.add_column('DEC',self.ndat.DEC,unit='deg',description='DEC')
ldat.add_column('Z',self.ndat.Z,unit='',description='redshift')
ldat.add_column('ZDIST',self.ndat.ZDIST,unit='',description='NSA ZDIST')
ldat.add_column('RHOMASS',self.rhomass,unit='Msun',description='Mass of galaxies w/in 300 kpc')
ldat.add_column('SIGMA_NN',self.sigma_nn,unit='Ngal/Mpc^2', description='3rd to 6th nearest neighbor')
ldat.add_column('SIGMA_5',self.sigma_5,unit='Ngal/Mpc^2', description='to 5th nearest neighbor')
ldat.add_column('SIGMA_10',self.sigma_10,unit='Ngal/Mpc^2', description='to 10th nearest neighbor')
# write out table
outfile=homedir+'research/LocalClusters/NSAmastertables/LocalDensityTables/'+self.prefix+'_localdensity.fits'
if os.path.exists(outfile):
os.remove(outfile)
ldat.write(outfile)
if __name__ == '__main__':
mkw11=cluster('MKW11')
mkw8=cluster('MKW8')
awm4=cluster('AWM4')
a2052=cluster('A2052')
a2063=cluster('A2063')
ngc=cluster('NGC6107')
coma=cluster ('Coma')
herc=cluster('Hercules')
a1367=cluster('A1367')
mylocalclusters=[mkw11,mkw8,awm4,a2052,a2063,ngc,coma,herc,a1367]
for cl in mylocalclusters:
print '\n ',cl.prefix,'\n'
#cl.geton24imageflag()
#cl.getsdssspeccat()
#cl.getsdssphotcat()
cl.localdensity()
cl.localdensity5()
cl.localdensitybymass()
cl.writeoutput()
| rfinn/LCS | paper1code/LCSwritemasterNSA.py | Python | gpl-3.0 | 10,228 | [
"Galaxy"
] | 8b657d369460d528d1075dc7acc608a8e11513457155ba9c0a024fbc0d54ad12 |
"""
Interactivity functions and classes using matplotlib and IPython widgets
**Gravity forward modeling**
* :class:`~fatiando.gravmag.interactive.Moulder`: a matplitlib GUI for 2D
forward modeling using polygons
----
"""
from __future__ import division, absolute_import
from future.builtins import zip
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy
from matplotlib import pyplot, widgets, patches
from matplotlib.lines import Line2D
from IPython.core.pylabtools import print_figure
from IPython.display import Image
from .. import utils
from . import talwani
from ..mesher import Polygon
class Moulder(object):
"""
Interactive 2D forward modeling using polygons.
A matplotlib GUI application. Allows drawing and manipulating polygons and
computes their predicted data automatically. Also permits contaminating the
data with gaussian pseudo-random error for producing synthetic data sets.
Uses :mod:`fatiando.gravmag.talwani` for computations.
*Moulder* objects can be persisted to Python pickle files using the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method and later
restored using :meth:`~fatiando.gravmag.interactive.Moulder.load`.
.. warning::
Cannot be used with ``%matplotlib inline`` on IPython notebooks because
the app uses the matplotlib plot window. You can still embed the
generated model and data figure on notebooks using the
:meth:`~fatiando.gravmag.interactive.Moulder.plot` method.
Parameters:
* area : list = (x1, x2, z1, z2)
The limits of the model drawing area, in meters.
* x, z : 1d-arrays
The x- and z-coordinates of the computation points (places where
predicted data will be computed). In meters.
* data : None or 1d-array
Observed data measured at *x* and *z*. Will plot this with black dots
along the predicted data.
* density_range : list = [min, max]
The minimum and maximum values allowed for the density. Determines the
limits of the density slider of the application. In kg.m^-3. Defaults
to [-2000, 2000].
* kwargs : dict
Other keyword arguments used to restore the state of the application.
Used by the :meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Not intended for general use.
Examples:
Make the Moulder object and start the app::
import numpy as np
area = (0, 10e3, 0, 5e3)
# Calculate on 100 points
x = np.linspace(area[0], area[1], 100)
z = np.zeros_like(x)
app = Moulder(area, x, z)
app.run()
# This will pop-up a window with the application (like the screenshot
# below). Start drawing (follow the instruction in the figure title).
# When satisfied, close the window to resume execution.
.. image:: ../_static/images/Moulder-screenshot.png
:alt: Screenshot of the Moulder GUI
After closing the plot window, you can access the model and data from the
*Moulder* object::
app.model # The drawn model as fatiando.mesher.Polygon
app.predicted # 1d-array with the data predicted by the model
# You can save the predicted data to use later
app.save_predicted('data.txt')
# You can also save the application and resume it later
app.save('application.pkl')
# Close this session/IPython notebook/etc.
# To resume drawing later:
app = Moulder.load('application.pkl')
app.run()
"""
# The tolerance range for mouse clicks on vertices. In pixels.
epsilon = 5
# App instructions printed in the figure suptitle
instructions = ' | '.join([
'n: New polygon', 'd: delete', 'click: select/move', 'esc: cancel'])
def __init__(self, area, x, z, data=None, density_range=[-2000, 2000],
**kwargs):
self.area = area
self.x, self.z = numpy.asarray(x), numpy.asarray(z)
self.density_range = density_range
self.data = data
# Used to set the ylims for the data axes.
if data is None:
self.dmin, self.dmax = 0, 0
else:
self.dmin, self.dmax = data.min(), data.max()
self.predicted = kwargs.get('predicted', numpy.zeros_like(x))
self.error = kwargs.get('error', 0)
self.cmap = kwargs.get('cmap', pyplot.cm.RdBu_r)
self.line_args = dict(
linewidth=2, linestyle='-', color='k', marker='o',
markerfacecolor='k', markersize=5, animated=False, alpha=0.6)
self.polygons = []
self.lines = []
self.densities = kwargs.get('densities', [])
vertices = kwargs.get('vertices', [])
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
self.polygons.append(poly)
self.lines.append(line)
def save_predicted(self, fname):
"""
Save the predicted data to a text file.
Data will be saved in 3 columns separated by spaces: x z data
Parameters:
* fname : string or file-like object
The name of the output file or an open file-like object.
"""
numpy.savetxt(fname, numpy.transpose([self.x, self.z, self.predicted]))
def save(self, fname):
"""
Save the application state into a pickle file.
Use this to persist the application. You can later reload the entire
object, with the drawn model and data, using the
:meth:`~fatiando.gravmag.interactive.Moulder.load` method.
Parameters:
* fname : string
The name of the file to save the application. The extension doesn't
matter (use ``.pkl`` if in doubt).
"""
with open(fname, 'w') as f:
vertices = [numpy.asarray(p.xy) for p in self.polygons]
state = dict(area=self.area, x=self.x,
z=self.z, data=self.data,
density_range=self.density_range,
cmap=self.cmap,
predicted=self.predicted,
vertices=vertices,
densities=self.densities,
error=self.error)
pickle.dump(state, f)
@classmethod
def load(cls, fname):
"""
Restore an application from a pickle file.
The pickle file should have been generated by the
:meth:`~fatiando.gravmag.interactive.Moulder.save` method.
Parameters:
* fname : string
The name of the file.
Returns:
* app : Moulder object
The restored application. You can continue using it as if nothing
had happened.
"""
with open(fname) as f:
state = pickle.load(f)
app = cls(**state)
return app
@property
def model(self):
"""
The polygon model drawn as :class:`fatiando.mesher.Polygon` objects.
"""
m = [Polygon(p.xy, {'density': d})
for p, d in zip(self.polygons, self.densities)]
return m
def run(self):
"""
Start the application for drawing.
Will pop-up a window with a place for drawing the model (below) and a
place with the predicted (and, optionally, observed) data (top).
Follow the instruction on the figure title.
When done, close the window to resume program execution.
"""
fig = self._figure_setup()
# Sliders to control the density and the error in the data
self.density_slider = widgets.Slider(
fig.add_axes([0.10, 0.01, 0.30, 0.02]), 'Density',
self.density_range[0], self.density_range[1], valinit=0.,
valfmt='%6.0f kg/m3')
self.error_slider = widgets.Slider(
fig.add_axes([0.60, 0.01, 0.30, 0.02]), 'Error',
0, 5, valinit=self.error, valfmt='%1.2f mGal')
# Put instructions on figure title
self.dataax.set_title(self.instructions)
# Markers for mouse click events
self._ivert = None
self._ipoly = None
self._lastevent = None
self._drawing = False
self._xy = []
self._drawing_plot = None
# Used to blit the model plot and make
# rendering faster
self.background = None
# Connect event callbacks
self._connect()
self._update_data()
self._update_data_plot()
self.canvas.draw()
pyplot.show()
def _connect(self):
"""
Connect the matplotlib events to their callback methods.
"""
# Make the proper callback connections
self.canvas.mpl_connect('button_press_event',
self._button_press_callback)
self.canvas.mpl_connect('key_press_event',
self._key_press_callback)
self.canvas.mpl_connect('button_release_event',
self._button_release_callback)
self.canvas.mpl_connect('motion_notify_event',
self._mouse_move_callback)
self.canvas.mpl_connect('draw_event',
self._draw_callback)
# Call the cleanup and extra code for a draw event when resizing as
# well. This is needed so that tight_layout adjusts the figure when
# resized. Otherwise, tight_layout snaps only when the user clicks on
# the figure to do something.
self.canvas.mpl_connect('resize_event',
self._draw_callback)
self.density_slider.on_changed(self._set_density_callback)
self.error_slider.on_changed(self._set_error_callback)
def plot(self, figsize=(10, 8), dpi=70):
"""
Make a plot of the data and model for embedding in IPython notebooks
Doesn't require ``%matplotlib inline`` to embed the plot (as that would
not allow the app to run).
Parameters:
* figsize : list = (width, height)
The figure size in inches.
* dpi : float
The number of dots-per-inch for the figure resolution.
"""
fig = self._figure_setup(figsize=figsize, facecolor='white')
self._update_data_plot()
pyplot.close(fig)
data = print_figure(fig, dpi=dpi)
return Image(data=data)
def _figure_setup(self, **kwargs):
"""
Setup the plot figure with labels, titles, ticks, etc.
Sets the *canvas*, *dataax*, *modelax*, *polygons* and *lines*
attributes.
Parameters:
* kwargs : dict
Keyword arguments passed to ``pyplot.subplots``.
Returns:
* fig : matplotlib figure object
The created figure
"""
fig, axes = pyplot.subplots(2, 1, **kwargs)
ax1, ax2 = axes
self.predicted_line, = ax1.plot(self.x, self.predicted, '-r')
if self.data is not None:
self.data_line, = ax1.plot(self.x, self.data, '.k')
ax1.set_ylabel('Gravity anomaly (mGal)')
ax1.set_xlabel('x (m)', labelpad=-10)
ax1.set_xlim(self.area[:2])
ax1.set_ylim((-200, 200))
ax1.grid(True)
tmp = ax2.pcolor(numpy.array([self.density_range]), cmap=self.cmap)
tmp.set_visible(False)
pyplot.colorbar(tmp, orientation='horizontal',
pad=0.08, aspect=80).set_label(r'Density (kg/cm3)')
# Remake the polygons and lines to make sure they belong to the right
# axis coordinates
vertices = [p.xy for p in self.polygons]
newpolygons, newlines = [], []
for xy, dens in zip(vertices, self.densities):
poly, line = self._make_polygon(xy, dens)
newpolygons.append(poly)
newlines.append(line)
ax2.add_patch(poly)
ax2.add_line(line)
self.polygons = newpolygons
self.lines = newlines
ax2.set_xlim(self.area[:2])
ax2.set_ylim(self.area[2:])
ax2.grid(True)
ax2.invert_yaxis()
ax2.set_ylabel('z (m)')
fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.06,
hspace=0.1)
self.figure = fig
self.canvas = fig.canvas
self.dataax = axes[0]
self.modelax = axes[1]
fig.canvas.draw()
return fig
def _density2color(self, density):
"""
Map density values to colors using the given *cmap* attribute.
Parameters:
* density : 1d-array
The density values of the model polygons
Returns
* colors : 1d-array
The colors mapped to each density value (returned by a matplotlib
colormap object.
"""
dmin, dmax = self.density_range
return self.cmap((density - dmin)/(dmax - dmin))
def _make_polygon(self, vertices, density):
"""
Create a polygon for drawing.
Polygons are matplitlib.patches.Polygon objects for the fill and
matplotlib.lines.Line2D for the contour.
Parameters:
* vertices : list of [x, z]
List of the [x, z] coordinate pairs of each vertex of the polygon
* density : float
The density of the polygon (used to set the color)
Returns:
* polygon, line
The matplotlib Polygon and Line2D objects
"""
poly = patches.Polygon(vertices, animated=False, alpha=0.9,
color=self._density2color(density))
x, y = list(zip(*poly.xy))
line = Line2D(x, y, **self.line_args)
return poly, line
def _update_data(self):
"""
Recalculate the predicted data (optionally with random error)
"""
self.predicted = talwani.gz(self.x, self.z, self.model)
if self.error > 0:
self.predicted = utils.contaminate(self.predicted, self.error)
def _update_data_plot(self):
"""
Update the predicted data plot in the *dataax*.
Adjusts the xlim of the axes to fit the data.
"""
self.predicted_line.set_ydata(self.predicted)
vmin = 1.2*min(self.predicted.min(), self.dmin)
vmax = 1.2*max(self.predicted.max(), self.dmax)
self.dataax.set_ylim(vmin, vmax)
self.dataax.grid(True)
self.canvas.draw()
def _draw_callback(self, value):
"""
Callback for the canvas.draw() event.
This is called everytime the figure is redrawn. Used to do some
clean up and tunning whenever this is called as well, like calling
``tight_layout``.
"""
self.figure.tight_layout()
def _set_error_callback(self, value):
"""
Callback when error slider is edited
"""
self.error = value
self._update_data()
self._update_data_plot()
def _set_density_callback(self, value):
"""
Callback when density slider is edited
"""
if self._ipoly is not None:
self.densities[self._ipoly] = value
self.polygons[self._ipoly].set_color(self._density2color(value))
self._update_data()
self._update_data_plot()
self.canvas.draw()
def _get_polygon_vertice_id(self, event):
"""
Find out which vertex of which polygon the event happened in.
If the click was inside a polygon (not on a vertex), identify that
polygon.
Returns:
* p, v : int, int
p: the index of the polygon the event happened in or None if
outside all polygons.
v: the index of the polygon vertex that was clicked or None if the
click was not on a vertex.
"""
distances = []
indices = []
for poly in self.polygons:
x, y = poly.get_transform().transform(poly.xy).T
d = numpy.sqrt((x - event.x)**2 + (y - event.y)**2)
distances.append(d.min())
indices.append(numpy.argmin(d))
p = numpy.argmin(distances)
if distances[p] >= self.epsilon:
# Check if the event was inside a polygon
x, y = event.x, event.y
p, v = None, None
for i, poly in enumerate(self.polygons):
if poly.contains_point([x, y]):
p = i
break
else:
v = indices[p]
last = len(self.polygons[p].xy) - 1
if v == 0 or v == last:
v = [0, last]
return p, v
def _button_press_callback(self, event):
"""
What actions to perform when a mouse button is clicked
"""
if event.inaxes != self.modelax:
return
if event.button == 1 and not self._drawing and self.polygons:
self._lastevent = event
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
# Find out if a click happened on a vertice
# and which vertice of which polygon
self._ipoly, self._ivert = self._get_polygon_vertice_id(event)
if self._ipoly is not None:
self.density_slider.set_val(self.densities[self._ipoly])
self.polygons[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_animated(True)
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self.modelax.draw_artist(self.polygons[self._ipoly])
self.modelax.draw_artist(self.lines[self._ipoly])
self.canvas.blit(self.modelax.bbox)
elif self._drawing:
if event.button == 1:
self._xy.append([event.xdata, event.ydata])
self._drawing_plot.set_data(list(zip(*self._xy)))
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif event.button == 3:
if len(self._xy) >= 3:
density = self.density_slider.val
poly, line = self._make_polygon(self._xy, density)
self.polygons.append(poly)
self.lines.append(line)
self.densities.append(density)
self.modelax.add_patch(poly)
self.modelax.add_line(line)
self._drawing_plot.remove()
self._drawing_plot = None
self._xy = None
self._drawing = False
self._ipoly = len(self.polygons) - 1
self.lines[self._ipoly].set_color([0, 1, 0, 0])
self.dataax.set_title(self.instructions)
self.canvas.draw()
self._update_data()
self._update_data_plot()
def _button_release_callback(self, event):
"""
Reset place markers on mouse button release
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
self.background = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
self.canvas.draw()
self._ivert = None
# self._ipoly is only released when clicking outside
# the polygons
self._lastevent = None
self._update_data()
self._update_data_plot()
def _key_press_callback(self, event):
"""
What to do when a key is pressed on the keyboard.
"""
if event.inaxes is None:
return
if event.key == 'd':
if self._drawing and self._xy:
self._xy.pop()
if self._xy:
self._drawing_plot.set_data(list(zip(*self._xy)))
else:
self._drawing_plot.set_data([], [])
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self._drawing_plot)
self.canvas.blit(self.modelax.bbox)
elif self._ivert is not None:
poly = self.polygons[self._ipoly]
line = self.lines[self._ipoly]
if len(poly.xy) > 4:
verts = numpy.atleast_1d(self._ivert)
poly.xy = numpy.array([xy for i, xy in enumerate(poly.xy)
if i not in verts])
line.set_data(list(zip(*poly.xy)))
self._update_data()
self._update_data_plot()
self.canvas.restore_region(self.background)
self.modelax.draw_artist(poly)
self.modelax.draw_artist(line)
self.canvas.blit(self.modelax.bbox)
self._ivert = None
elif self._ipoly is not None:
self.polygons[self._ipoly].remove()
self.lines[self._ipoly].remove()
self.polygons.pop(self._ipoly)
self.lines.pop(self._ipoly)
self.densities.pop(self._ipoly)
self._ipoly = None
self.canvas.draw()
self._update_data()
self._update_data_plot()
elif event.key == 'n':
self._ivert = None
self._ipoly = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.modelax.bbox)
self._drawing = True
self._xy = []
self._drawing_plot = Line2D([], [], **self.line_args)
self._drawing_plot.set_animated(True)
self.modelax.add_line(self._drawing_plot)
self.dataax.set_title(' | '.join([
'left click: set vertice', 'right click: finish',
'esc: cancel']))
self.canvas.draw()
elif event.key == 'escape':
self._drawing = False
self._xy = []
if self._drawing_plot is not None:
self._drawing_plot.remove()
self._drawing_plot = None
for line, poly in zip(self.lines, self.polygons):
poly.set_animated(False)
line.set_animated(False)
line.set_color([0, 0, 0, 0])
self.canvas.draw()
def _mouse_move_callback(self, event):
"""
Handle things when the mouse move.
"""
if event.inaxes != self.modelax:
return
if event.button != 1:
return
if self._ivert is None and self._ipoly is None:
return
x, y = event.xdata, event.ydata
p = self._ipoly
v = self._ivert
if self._ivert is not None:
self.polygons[p].xy[v] = x, y
else:
dx = x - self._lastevent.xdata
dy = y - self._lastevent.ydata
self.polygons[p].xy[:, 0] += dx
self.polygons[p].xy[:, 1] += dy
self.lines[p].set_data(list(zip(*self.polygons[p].xy)))
self._lastevent = event
self.canvas.restore_region(self.background)
self.modelax.draw_artist(self.polygons[p])
self.modelax.draw_artist(self.lines[p])
self.canvas.blit(self.modelax.bbox)
| rafaelmds/fatiando | fatiando/gravmag/interactive.py | Python | bsd-3-clause | 24,042 | [
"Gaussian"
] | a2d0fd9ae082520ec0999d382a96071a9415a11c15858eef76629e36f1509f1a |
# Standard libraries
from collections import deque
from urlparse import urlsplit
import datetime
import re
import socket
import types
import urllib
import urllib2
# Third-party libraries
from BeautifulSoup import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
import openanything
import simplejson
# TurboGears libraries
from turbogears.identity import *
from turbogears import identity
import logging
logger = logging.getLogger("buzzbot.searcher")
DEBUG_ON= False
if not DEBUG_ON:
import model
# TODO move bot routines and utilities to real classes and functions
import bot
myBotRoutines = bot.BotRoutines()
import botUtilities
botUtilities = botUtilities.Utilities()
def permutation(lst):
queue = [-1]
lenlst = len(lst)
while queue:
i = queue[-1]+1
if i == lenlst:
queue.pop()
elif i not in queue:
queue[-1] = i
if len(queue) == lenlst:
yield [lst[j] for j in queue]
queue.append(-1)
else:
queue[-1] = i
def combinations(n, k):
#n is number of choices, k is the number of elements in each combination returned
# returns
current = range(1, k + 1)
max = range(n - k + 1, n + 1)
while current != max:
for number in current:
print 'combinations %2d'% (number),
index = -1
if current[index] != max[index]:
current[index] = current[index] + 1
else:
for indices in range(-2, -k - 1, -1):
if current[indices] != max[indices]:
current[indices] = current[indices] + 1
for other_indices in range(indices + 1, 0):
current[other_indices] = current[indices] + other_indices - indices
break
return max
def getUserID():
#gets the user id, substitutes a guest login id if none available (this happens if cookies
# are blocked)
GUESTID = 3
msg=""
thisUserIDObj = identity.current.identity()
#tg's identity module relies on cookies. If the user has these blocked, the user is
# set to 'None', which confuses the routine. In this case, we'll provide a 'guest' id
# and a notification
if thisUserIDObj.user_id == None:
setUser(GUESTID)
thisUserIDObj = identity.current.identity()
msg = "Note: you are logged in as guest because cookies are blocked on your browser."
thisUser = thisUserIDObj.user_id
thisUserGroup = thisUserIDObj.groups
return dict(id=thisUser, groups=thisUserGroup, msg=msg)
def setUser(thisid):
user =User.get(thisid)
visit_key=tg.visit.current().key
#VisitIdentity = tg.identity.soprovider.TG_VisitIdentity
IdentityObject = tg.identity.soprovider.SqlObjectIdentity
try:
link =VisitIdentity.selectBy(visit_key)
except :
link = None
if not link:
link =VisitIdentity(visit_key=visit_key, user_id=user.id)
else:
link.user_id =user.id
user_identity = IdentityObject(visit_key)
identity.set_current_identity(user_identity)
def getLpidFromSearchID(searchid):
#looks up the lpid given a searchid
thislp = model.Listeningpost.selectBy(searchtorun=searchid)
return thislp[0].id
class BaseSearcher(object):
NON_EMPTY_STRING = re.compile('\w')
def __init__(self, include=None, exclude=None, search_id=None, search=None, max_results=None, referer=None, key=None):
"""
Instantiate a searcher.
Keyword arguments:
* `search_id`: Search record id to get.
* `max_results`: Maximum number of results to return.
* `referer`: Referer path to provide for search queries.
* `key`: API key to send with search queries.
"""
self.candidates = deque()
self._position = 0
self._search = None
self._include = []
self._exclude = []
if search:
self._search = search
if search_id:
self._search = model.Search.get(search_id)
if self._search:
self._include = self._search.urlsearchfor
self._exclude = self._search.urleliminate
if include:
self._include = include
if exclude:
self._exclude = exclude
self._max_results = max_results or 10
self._referer = referer or "http://synovate.com/"
self._key = key
self._query = None
@classmethod
def _sanitize_string(self, string):
return string.encode('latin1', 'replace')
@classmethod
def _split_delimited_string(self, string):
"""
Return a list by splitting a delimited `string`. If the input is already a list, just return it.
"""
return isinstance(string, types.StringType) and string.split(',') or string
def next(self):
"""
Perform a partial search and return a list of the next results.
Subclasses must implement this.
"""
raise NotImplemented()
def search(self):
"""
Perform search and return list of resulting URLs.
Do not implement this, just ues the BaseSearcher's method.
"""
while True:
candidates = self.next()
if not candidates or self._position >= self._max_results:
break
return self.candidates
class GoogleSearcher(BaseSearcher):
"""
Search via Google. See documentation for `BaseSearcher`.
"""
def __init__(self, max_results=64, referer="http://google.com/", **kwargs):
BaseSearcher.__init__(self, max_results=max_results, referer=referer, **kwargs)
# TODO Do we really need permutations? If so, then need to make self._queries list.
self._query = self._query_for(include=self._include, exclude=self._exclude)
@classmethod
def _query_for(self, include, exclude):
query = u""
for phrase in self._split_delimited_string(include):
if self.NON_EMPTY_STRING.match(phrase):
query += u" \"%s\"" % phrase
for phrase in self._split_delimited_string(exclude):
if self.NON_EMPTY_STRING.match(phrase):
query += u" -\"%s\"" % phrase
return query
def next(self):
if self._position >= self._max_results:
return None
params = urllib.urlencode({'q': self._query, 'v': '1.0', 'rsz': 'large', 'start': self._position})
url = 'http://ajax.googleapis.com/ajax/services/search/web?%s' % (params)
logger.debug("GoogleSearcher: retrieving %s" % (url))
req = urllib2.Request(url)
req.add_header('Referer', self._referer)
try:
self._response = urllib2.urlopen(req)
self._body = self._response.read()
except Exception, e:
logger.error("GoogleSearcher: %s" % e)
return []
self._doc = simplejson.loads(self._body)
if self._doc['responseStatus'] != 200:
return None
candidates = []
for element in self._doc['responseData']['results']:
result = self._sanitize_string(element['url'])
candidates.append(result)
self.candidates.append(result)
self._position += len(self._doc['responseData']['results'])
logger.debug("GoogleSearcher: found %s matches" % len(candidates))
return candidates
class YahooSearcher(BaseSearcher):
"""
Search via Yahoo. See documentation for `BaseSearcher`.
"""
def __init__(self, max_results=100, referer="http://yahoo.com/", key="tjF4bq7V34G2HmcBVfiZqVur_LEM04ze1THb_actrW9M60yWJfAG.ptPLg--", **kwargs):
BaseSearcher.__init__(self, max_results=max_results, referer=referer, key=key, **kwargs)
# TODO Do we really need permutations? If so, then need to make self._queries list.
self._query = self._query_for(include=self._include, exclude=self._exclude)
@classmethod
def _query_for(self, include, exclude):
query = u""
for phrase in self._split_delimited_string(include):
if self.NON_EMPTY_STRING.match(phrase):
query += u" \"%s\"" % phrase
for phrase in self._split_delimited_string(exclude):
if self.NON_EMPTY_STRING.match(phrase):
query += u" -\"%s\"" % phrase
return query
def next(self):
if self._position >= self._max_results:
return None
results_per_page = self._max_results < 100 and self._max_results or 100
params = urllib.urlencode({'query': self._query, 'appid': self._key, 'start': self._position, 'results': results_per_page, 'output': 'json'})
url = 'http://search.yahooapis.com/WebSearchService/V1/webSearch?%s' % (params)
logger.debug("YahooSearcher: retrieving %s" % (url))
req = urllib2.Request(url)
req.add_header('Referer', self._referer)
try:
self._response = urllib2.urlopen(req)
self._body = self._response.read()
except Exception, e:
logger.error("YahooSearcher: %s" % e)
return []
self._doc = simplejson.loads(self._body)
candidates = []
for element in self._doc['ResultSet']['Result']:
result = self._sanitize_string(element['Url'])
candidates.append(result)
self.candidates.append(result)
self._position += len(candidates)
logger.debug("YahooSearcher: found %s matches" % len(candidates))
return candidates
class TechnoratiSearcher(BaseSearcher):
"""
Search via Technorati. See documentation for `BaseSearcher`.
"""
def __init__(self, max_results=100, referer="http://api.technorati.com/", key="de1ae02365787a36717618b4814cf7ea", **kwargs):
BaseSearcher.__init__(self, max_results=max_results, referer=referer, key=key, **kwargs)
# TODO Do we really need permutations? If so, then need to make self._queries list.
self._query = self._query_for(include=self._include, exclude=self._exclude)
@classmethod
def _query_for(self, include, exclude):
query = u""
for phrase in self._split_delimited_string(include):
if self.NON_EMPTY_STRING.match(phrase):
query += u" \"%s\"" % phrase
# TODO Is there really no way to make Technorati exclude words?!
#for phrase in self._split_delimited_string(exclude):
# if self.NON_EMPTY_STRING.match(phrase):
# query += u" -\"%s\"" % phrase
return query
def next(self):
if self._position >= self._max_results:
return None
results_per_page = self._max_results < 100 and self._max_results or 100
params = {
"key": self._key,
"query": self._query,
"format": "xml",
"language": "en",
"authority": "a4",
"start": self._position,
"limit": results_per_page,
}
url = "http://api.technorati.com/search?%s" % urllib.urlencode(params)
logger.debug("TechnoratiSearcher: retrieving %s" % (url))
req = urllib2.Request(url)
try:
self._response = urllib2.urlopen(req)
self._body = self._response.read()
except Exception, e:
logger.error("TechnoratiSearcher: %s" % e)
return []
self._doc = BeautifulStoneSoup(self._body)
candidates = []
nodes = self._doc.findAll("item")
for node in nodes:
# `permalink` is the blog entry, `url` is the blog's homepage
result = self._sanitize_string(node.find("permalink").contents[0])
candidates.append(result)
self.candidates.append(result)
self._position += len(candidates)
logger.debug("TechnoratiSearcher: found %s matches" % len(candidates))
return candidates
class MultiSearcher(BaseSearcher):
"""
Searches using multiple searcher engines.
"""
searcher_classes = [
GoogleSearcher,
YahooSearcher,
TechnoratiSearcher,
]
def __init__(self, searcher_classes=[], **kwargs):
self.candidates = deque()
self._searchers = []
self._kwargs = kwargs
for searcher_class in (searcher_classes or self.searcher_classes):
self._searchers.append(searcher_class(**self._kwargs))
self._active_searchers = []
self._active_searchers.extend(self._searchers)
def next(self):
results = []
for searcher in self._active_searchers:
searcher_results = searcher.next()
if searcher_results:
results.extend(searcher_results)
self.candidates.extend(searcher_results)
else:
self._active_searchers.remove(searcher)
if results:
return results
else:
return None
def search(self):
while True:
results = self.next()
if results:
continue
else:
logger.debug("MultiSearcher done")
break
return self.candidates
class SearchRunner(object):
"""
Wrapper that does all work needed for running searches.
"""
def __init__(self, **kwargs):
"""
Instantiate.
Keyword arguments:
* `search_id`: Required!
* All others same as `MultiSearcher`.
"""
self._search_id = kwargs.get('search_id')
self._search = model.Search.get(self._search_id)
self._max_results = kwargs.get('max_results', None)
self._delete_existing = kwargs.get('delete_existing', False)
searcher_kwargs = dict(
search_id = self._search_id,
max_results = self._max_results
)
self._searcher = MultiSearcher(**searcher_kwargs)
def __iter__(self):
"""
Return URLS records found by the search.
"""
# Delete existing results
if self._delete_existing:
model.Contsearch.deleteBy(search = self._search_id)
model.Content.deleteBy(searchid = self._search_id, deleteme = self._delete_existing)
model.URLS.deleteBy(search_id=self._search_id, deleteme = self._delete_existing)
# Perform search
candidates = self._searcher.search()
myBotRoutines.addUrlsFromDeque(self._search_id, self._delete_existing, candidates)
# Cleanup results
# TODO prevent searchers from creating unwanted URLs in the first place
botUtilities.deDupUrls(self._search_id)
botUtilities.fixUrls(self._search.targetword, self._search_id)
# Return results
# TODO why are we storing URLS objects? does anything use them?
urls_records = model.URLS.selectBy(search_id = self._search_id)
if self._max_results:
urls_records = urls_records[:self._max_results]
for urls_record in urls_records:
yield urls_record
def search(self):
"""
Return list of URLS records found by the search.
"""
return list(self)
class Search(object):
#this class searches specifically Google, returning code fragments.
def __init__(self, term, max_results=14):
#this is executed when the class is called. It doesn't work as expected
# because Google apparently limits results to pages, 7 results each.
#it's called by invoking an instance of this class, passing in a search term e.g.,
#myreturn = instanceOfSearch("searchTerm")
self.term = term
self.max_results = max_results
self.candidates = deque()
sstr = urllib.urlencode([('q', self.term),('start', '')])
self._url = (
'http://ajax.googleapis.com/' +
'ajax/services/search/web?v=1.0&' +
urllib.urlencode([('q', self.term),
('start', '')]))
a=0
def iterateMe(self):
self.__iter__()
def _fill(self, start):
"""Fill the queue with fresh search results."""
#results is a dict of responseData, responseDetails, responseStatus
#responseData is a dict of cursor, results
# results (key of response data) is a list of search returns...
# each of which is a dict consisting of:
# GsearchResultClass, visibleUrl, titleNoFormatting, title, url, cacheUrl
# unescapedUrl, content
# ...the unescapedURL is what we want
url = self._url + str(start)
results = simplejson.loads(openanything.fetch(url)['data'])
if results['responseStatus'] != 200:
raise Exception('Google Error: %s' % results['responseStatus'])
try:
data = results['responseData']
if not data['results']:
return
for r in data['results']:
self.candidates.append(
(r['unescapedUrl'], r['titleNoFormatting'], r['content']))
return len(data['results'])
except KeyError, ex:
raise Exception('Google Error: unexpected response (%r)' % ex)
def filter(self, url, title, excerpt):
return True
def __iter__(self):
i, max = 0, self.max_results
while i < max:
if not self.candidates:
if not self._fill(i):
return
candidate = self.candidates.popleft()
if self.filter(*candidate):
i += 1
yield self.candidates.popleft()
def __repr__(self):
return 'buzzbot.search.Search(%r)' % self.term
def _make_re(text):
word_bounded = re.split(r"\b%s\b", text) # for word in text.split()
return re.compile('(%s)' % '|'.join(word_bounded))
#return "dogma" #re.compile('(%s)' % '|'.join(word_bounded))
class BuzzSearch(Search):
BAD_URL_GENERAL = _make_re(
'blog wiki amazon tinyurl photo music yootube '
'napster podshow games mevio vidio pod')
BAD_URL_SPAM = _make_re(
'robloger ihelpyoublog archive blogtoolscollection')
BAD_URL_OLD = _make_re('2004 2005 2006 2007')
def filter(self, url, title, excerpt):
#this filter is pretty clever, but misapplied to the URLs;
# we'll keep it around to crib later
#url = url.lower()
#if self.BAD_URL_GENERAL.search(url):
# return False
#if self.BAD_URL_SPAM.search(url):
# return False
#if self.BAD_URL_OLD.search(url):
# return False
return True
class RunGeneralSearch(object):
#this is intended to expand the repertoire of search results beyond what the google
# api will provide by going to other blog-oriented sites. It works like this:
# compose the a search string to the site's specs
# for each page of results
# find the urls returned
# for each url
# screen for duplicates, then add to the database, flagging as a 'seed' url
# visit the site, grab content, check for duplicates, then add to database
#we can enhance this later by recursing thru the urls, checking sites linked, etc.
#it's called by invoking an instance of this class, passing in a search term e.g.,
#myreturn = instanceOfRunGoogleSearch("searchNum", sitename)
MAX_PAGES = 10
def __init__(self, searchNum):
if not DEBUG_ON:
#grab the search specifics from the database
searchobj = model.Search.get(searchNum)
searchstr = searchobj.urlsearchfor
elimstr = searchobj.urleliminate
if not searchstr:
urlFind = []
else:
urlFind = searchstr.split(',')
if not searchobj.urleliminate:
urlEliminate = []
else: urlEliminate = elimstr.split(',')
if type(urlEliminate) ==str:
urlEliminate = [urlEliminate]
if type(urlFind)==str:
urlFind = [urlFind]
else: #debug search parameters
searchstr = "gates, microsoft, apple"
elimstr = "aardvark, zebra"
urlFind = searchstr.split(',')
urlEliminate = elimstr.split(',')
#add a plus sign to signal these terms for elimination
for i in range(0, len(urlFind)):
urlFind[i]='+'+urlFind[i].strip()
#add a plus-minus sign to signal these terms for elimination
for i in range(0, len(urlEliminate)):
urlEliminate[i]='+-'+urlEliminate[i].strip()
#get all the permutations if there just a few search terms
self._terms = []
if len(urlFind) <= 1: #this implies a single search term
self._terms.append(urlFind + urlEliminate)
else:
searchPermutations = permutation(urlFind)
for u in searchPermutations:
self._terms.append(u + urlEliminate)
self.candidates = deque()
def _createSearchUrls(self, sitename):
#creates a bunch of search engine queries, one for each page of results and one for
# each permutation of search terms; these differ per the search engine's "dialect".
# This is used to find new urls (it won't visit existing urls).
searchUrls = []
#get the short site name (this is the second-from-last element of the site name)
sitenameComponents = sitename.split('.')
shortSitename = sitenameComponents[len(sitenameComponents)-2]
#clean up the list of search terms
if sitename == 'www.technorati.com':
#sample query: http://www.technorati.com/search/cubs%2BAND%2Bthe%2BAND%2Btornado?authority=a4&language=en&page=4
for p in range(1, self.MAX_PAGES+1): #for each page of results
for termlist in self._terms: #for each permutation of terms
termstr = []
for t in termlist: #clean and concantenate the search terms
t= t.replace('-','NOT')
t= t.replace(" ", "%20")
termstr.append(t)
searchstr = ''
for ts in termstr:
searchstr = searchstr + ts
mystr =[] #build up the search string
mystr.append("http://" + sitename + "/search/")
mystr.append(searchstr)
mystr.append("?authority=a4&language=en&page=" + str(p))
thisUrl = ""
for m in mystr:
thisUrl = thisUrl + m
searchUrls.append(thisUrl) #...and add it to our list
#add other search engines here
return searchUrls
def _fill(self, searchid, sitename):
"""Pings search engine for new urls, then finds content."""
#to avoid banging on the search engine too, we'll visit each site found before returning
#grab the lpid and userid
thislp = getLpidFromSearchID(searchid)
userinfo = getUserID()
thisuser = userinfo['id']
#get the short site name (this is the second-from-last element of the site name)
sitenameComponents = sitename.split('.')
shortSitename = sitenameComponents[len(sitenameComponents)-2]
sitelist = self._createSearchUrls(sitename)
scoreContent = True
deleteme = False
socket.setdefaulttimeout(5)
candidates = []
searchDBobj = model.Search.get(searchid)
targetword = searchDBobj.targetword
#*************************
for s in sitelist:
trials = 0
fetchOk = False
try:
#myRetObj = openanything.fetch(s)
myRetObj = myBotRoutines.getContentWithFeedparser(s)
fetchOk = True
except:
print "timed out fetching: " + s
pass
if fetchOk:
#results = myRetObj['data']
results = myRetObj['cont']
if myRetObj['status'] == 200:
#rawurls = myBotRoutines.digUrlsOutOfPage(myRetObj)
rawurls = myRetObj.links
#do a first-order screen to eliminate known bad urls, internal links and dups
for u in rawurls:
isgood = True
u=u.replace('"', '')
if u.find(shortSitename) >=0:
isgood = False #screens for internal links
rejectedBecause = "internal link"
else:
urlscreen = myBotRoutines.goodUrl(u)
if urlscreen.get('status') <> 'bad':
if model.URLS.selectBy(search_id =searchid, urltext = u).count() > 0:
isgood = False
rejectedBecause = "duplicate url"
else:
rejectedBecause = urlscreen.get('reason')
isgood=False
if not isgood:
print "rejected: " + rejectedBecause + " " + u
a=0
if isgood:
scheme, netloc, path, query, fragment = urlsplit(u)
now = datetime.datetime.now()
#add the url to the database and the return array
url = model.URLS(scheme = scheme, netloc=netloc, path=path, search_id = searchid,
query = query, fragment=fragment, urltext=u,
lpid = thislp, datefound=now, userid = thisuser)
#grab content from the url
myret = myBotRoutines.visitScoreStoreSingleURL(searchid, url, scoreContent, deleteme)
#if we have not added any content for this url, delete it
if myret == 'bad':
model.URLS.delete(url.id)
else:
candidates.append(u)
return self.candidates
def __iter__(self):
if not self.candidates:
if not self._fill():
return
candidate = self.candidates.popleft()
yield self.candidates.popleft()
def __repr__(self):
return 'buzzbot.search.Search(%r)' % self._terms
class RunBackgroudSearch(object):
def __init__(self, mysites):
mylps = model.Listeningpost.select()
#mysites = ['technorati']
for lp in mylps:
searchNum = lp.searchtorun
for sitename in mysites:
searchInstance = RunGeneralSearch(searchNum)
myiter = searchInstance._fill(searchNum, sitename)
if __name__ == '__main__':
# TODO These samples are broken, are they needed?
"""
#searchInstance = RunGoogleSearch("dogma")
#myiter = searchInstance.__iter__()
searchid = 64
#searchInstance = RunGoogleSearch(searchid)
searchInstance = RunGeneralSearch(searchNum, sitename)
#this invokes the search routine
myiter = searchInstance._fill()
#this accesses the deque (double-ended queue object, pronounced "deck") and adds urls to the db
if not DEBUG_ON:
myBotRoutines.addUrlsFromDeque(searchid, searchInstance.candidates)
#myBotRoutines.visitUrlsLiteVersion(searchid, scoreContent)
myBotRoutines.visutUrlsFeedParser(searchid, searchInstance.candidates)
"""
# Start environment
import buzzbot.commands
buzzbot.commands.boot()
# Run searcher
### searcher_class = YahooSearcher
### searcher_class = GoogleSearcher
### searcher_class = TechnoratiSearcher
searcher_class = MultiSearcher
### searcher_class = MultiSearcher
searcher = searcher_class(include="linus pauling", exclude="buy, pharmacy", max_results=8)
results = searcher.search()
print "Results:"
for result in results:
print "* %s" % result
| pbarton666/buzz_bot | bot_project/buzzbot/searcher.py | Python | mit | 29,230 | [
"VisIt"
] | 76cb74d254dd4c358ae346c55dff29ed3b7b3fee31e817e60f2716152a8209c6 |
#!/usr/bin/env python3
# Copyright (C) 2021
# Sebastian Eibl, Max Planck Computing & Data Facility
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressopp
import h5py
import subprocess
import unittest
class TestH5MD(unittest.TestCase):
def binary_compare(self, f1, f2):
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(512)
b2 = fp2.read(512)
self.assertEqual(b1, b2)
if not b1:
return True
def compare_hdf5_structure(self, filename1, filename2):
f1 = h5py.File(filename1, 'r')
f2 = h5py.File(filename2, 'r')
f1_groups = []
f1.visit(lambda x: f1_groups.append(x))
f2_groups = []
f2.visit(lambda x: f2_groups.append(x))
self.assertListEqual(f1_groups, f2_groups)
for key in f1_groups:
f1_dataset = f1[key]
f2_dataset = f2[key]
if type(f1_dataset) == h5py.Dataset:
self.assertEqual(type(f2_dataset), h5py.Dataset)
self.assertTupleEqual(f1_dataset.shape, f2_dataset.shape)
self.assertEqual(f1_dataset.dtype, f2_dataset.dtype)
def test_dump(self):
self.system, self.integrator = espressopp.standard_system.Default((10., 10., 10.))
self.system.rng = espressopp.esutil.RNG(42)
for pid in range(34):
pos = self.system.bc.getRandomPos()
self.system.storage.addParticle(pid, pos)
dump_h5md_parallel = espressopp.io.DumpH5MDParallel(self.system, 'dump.h5')
dump_h5md_parallel.dump()
self.compare_hdf5_structure('reference.h5', 'dump.h5')
def test_reload_dump(self):
self.system, self.integrator = espressopp.standard_system.Default((10., 10., 10.))
self.system.rng = espressopp.esutil.RNG(42)
for pid in range(34):
pos = self.system.bc.getRandomPos()
self.system.storage.addParticle(pid, pos)
dump_h5md_parallel = espressopp.io.DumpH5MDParallel(self.system, 'reference.h5')
dump_h5md_parallel.dump()
self.system.storage.removeAllParticles()
restore_h5md_parallel = espressopp.io.RestoreH5MDParallel(self.system, 'reference.h5')
restore_h5md_parallel.restore()
dump_h5md_parallel = espressopp.io.DumpH5MDParallel(self.system, 'dump.h5')
dump_h5md_parallel.dump()
self.binary_compare('reference.h5', 'dump.h5')
if __name__ == '__main__':
unittest.main()
| espressopp/espressopp | testsuite/io/test_h5md_parallel.py | Python | gpl-3.0 | 3,193 | [
"ESPResSo",
"VisIt"
] | a9a6a929d02ea8cb93f5c976e1b53179c88dd942c6fe78cec7102566f88b977c |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
# Add skbio.io to sys.modules to prevent cycles in our imports
import skbio.io # noqa
# imports included for convenience
from skbio.sequence import Sequence, DNA, RNA, Protein, GeneticCode
from skbio.stats.distance import DistanceMatrix
from skbio.alignment import local_pairwise_align_ssw, TabularMSA
from skbio.tree import TreeNode, nj
from skbio.io import read, write
from skbio._base import OrdinationResults
__all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
'DistanceMatrix', 'local_pairwise_align_ssw', 'TabularMSA',
'TreeNode', 'nj', 'read', 'write', 'OrdinationResults']
__credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
__version__ = "0.4.2-dev"
mottos = [
# 03/15/2014
"It's gonna get weird, bro.",
# 05/14/2014
"no cog yay",
# 03/18/2015
"bincount!",
]
motto = mottos[-1]
# Created at patorjk.com
title = r"""
* *
_ _ _ _ _ _
(_) | (_) | | | (_)
___ ___ _| | ___| |_ ______| |__ _ ___
/ __|/ __| | |/ / | __|______| '_ \| |/ _ \
\__ \ (__| | <| | |_ | |_) | | (_) |
|___/\___|_|_|\_\_|\__| |_.__/|_|\___/
* *
"""
# Created by @gregcaporaso
art = r"""
Opisthokonta
\ Amoebozoa
\ /
* Euryarchaeota
\ |_ Crenarchaeota
\ *
\ /
*
/
/
/
*
/ \
/ \
Proteobacteria \
Cyanobacteria
"""
if __doc__ is None:
__doc__ = title + art
else:
__doc__ = title + art + __doc__
test = TestRunner(__file__).test
if __name__ == '__main__':
test()
| anderspitman/scikit-bio | skbio/__init__.py | Python | bsd-3-clause | 2,365 | [
"scikit-bio"
] | aef6afcad5207a1f71d0b2362c135ca69c464e05fe9ba383f1cbd6ffe58b75e4 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Most of the pytorch code is adapted from Pytorch's tutorial for
# neural networks training on Cifar10
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
#
from __future__ import print_function
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.learn.pytorch import Estimator
from zoo.orca.learn.metrics import Accuracy
from zoo.orca.learn.trigger import EveryEpoch
parser = argparse.ArgumentParser(description='PyTorch Cifar10 Example')
parser.add_argument('--cluster_mode', type=str, default="local",
help='The cluster mode, such as local, yarn or k8s.')
parser.add_argument('--backend', type=str, default="bigdl",
help='The backend of PyTorch Estimator; '
'bigdl and torch_distributed are supported')
args = parser.parse_args()
if args.cluster_mode == "local":
init_orca_context(memory="4g")
elif args.cluster_mode == "yarn":
init_orca_context(
cluster_mode="yarn-client", num_nodes=2, driver_memory="4g",
conf={"spark.rpc.message.maxSize": "1024",
"spark.task.maxFailures": "1",
"spark.driver.extraJavaOptions": "-Dbigdl.failure.retryTimes=1"})
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def train_loader_creator(config, batch_size):
trainset = torchvision.datasets.CIFAR10(root=config.get("root", "./data"), train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
return trainloader
def test_loader_creator(config, batch_size):
testset = torchvision.datasets.CIFAR10(root=config.get("root", "./data"), train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
return testloader
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def model_creator(config):
net = Net()
return net
def optim_creator(model, config):
optimizer = optim.SGD(model.parameters(),
lr=config.get("lr", 0.001),
momentum=config.get("momentum", 0.9))
return optimizer
criterion = nn.CrossEntropyLoss()
batch_size = 4
root_dir = "./data"
train_loader = train_loader_creator(config={"root": root_dir}, batch_size=batch_size)
test_loader = test_loader_creator(config={"root": root_dir}, batch_size=batch_size)
# plot some random images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(batch_size)))
dataiter = iter(test_loader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(batch_size)))
if args.backend == "bigdl":
net = model_creator(config={})
optimizer = optim_creator(model=net, config={"lr": 0.001})
orca_estimator = Estimator.from_torch(model=net,
optimizer=optimizer,
loss=criterion,
metrics=[Accuracy()],
backend="bigdl")
orca_estimator.fit(data=train_loader, epochs=2, validation_data=test_loader,
checkpoint_trigger=EveryEpoch())
res = orca_estimator.evaluate(data=test_loader)
print("Accuracy of the network on the test images: %s" % res)
elif args.backend == "torch_distributed":
orca_estimator = Estimator.from_torch(model=model_creator,
optimizer=optim_creator,
loss=criterion,
metrics=[Accuracy()],
backend="torch_distributed",
config={"lr": 0.001,
"root": root_dir})
orca_estimator.fit(data=train_loader_creator, epochs=2, batch_size=batch_size)
res = orca_estimator.evaluate(data=test_loader_creator)
for r in res:
print(r, ":", res[r])
else:
raise NotImplementedError("Only bigdl and torch_distributed are supported as the backend,"
" but got {}".format(args.backend))
stop_orca_context()
| intel-analytics/analytics-zoo | pyzoo/zoo/examples/orca/learn/pytorch/cifar10/cifar10.py | Python | apache-2.0 | 6,360 | [
"ORCA"
] | 78cce4d3557fd78f0ef736638bde70c8e0e21bec55f6cf0db7fac9a1dc61c018 |
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import gc
import sys
import time
from threading import Thread, Event
import zmq
from zmq.tests import (
BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestContext(BaseZMQTestCase):
def test_init(self):
c1 = self.Context()
self.assert_(isinstance(c1, self.Context))
del c1
c2 = self.Context()
self.assert_(isinstance(c2, self.Context))
del c2
c3 = self.Context()
self.assert_(isinstance(c3, self.Context))
del c3
def test_dir(self):
ctx = self.Context()
self.assertTrue('socket' in dir(ctx))
if zmq.zmq_version_info() > (3,):
self.assertTrue('IO_THREADS' in dir(ctx))
ctx.term()
def test_term(self):
c = self.Context()
c.term()
self.assert_(c.closed)
def test_context_manager(self):
with self.Context() as c:
pass
self.assert_(c.closed)
def test_fail_init(self):
self.assertRaisesErrno(zmq.EINVAL, self.Context, -1)
def test_term_hang(self):
rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
req.setsockopt(zmq.LINGER, 0)
req.send(b'hello', copy=False)
req.close()
rep.close()
self.context.term()
def test_instance(self):
ctx = self.Context.instance()
c2 = self.Context.instance(io_threads=2)
self.assertTrue(c2 is ctx)
c2.term()
c3 = self.Context.instance()
c4 = self.Context.instance()
self.assertFalse(c3 is c2)
self.assertFalse(c3.closed)
self.assertTrue(c3 is c4)
def test_many_sockets(self):
"""opening and closing many sockets shouldn't cause problems"""
ctx = self.Context()
for i in range(16):
sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
[ s.close() for s in sockets ]
# give the reaper a chance
time.sleep(1e-2)
ctx.term()
def test_sockopts(self):
"""setting socket options with ctx attributes"""
ctx = self.Context()
ctx.linger = 5
self.assertEqual(ctx.linger, 5)
s = ctx.socket(zmq.REQ)
self.assertEqual(s.linger, 5)
self.assertEqual(s.getsockopt(zmq.LINGER), 5)
s.close()
# check that subscribe doesn't get set on sockets that don't subscribe:
ctx.subscribe = b''
s = ctx.socket(zmq.REQ)
s.close()
ctx.term()
def test_destroy(self):
"""Context.destroy should close sockets"""
ctx = self.Context()
sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
# close half of the sockets
[ s.close() for s in sockets[::2] ]
ctx.destroy()
# reaper is not instantaneous
time.sleep(1e-2)
for s in sockets:
self.assertTrue(s.closed)
def test_destroy_linger(self):
"""Context.destroy should set linger on closing sockets"""
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
req.send(b'hi')
time.sleep(1e-2)
self.context.destroy(linger=0)
# reaper is not instantaneous
time.sleep(1e-2)
for s in (req,rep):
self.assertTrue(s.closed)
def test_term_noclose(self):
"""Context.term won't close sockets"""
ctx = self.Context()
s = ctx.socket(zmq.REQ)
self.assertFalse(s.closed)
t = Thread(target=ctx.term)
t.start()
t.join(timeout=0.1)
self.assertTrue(t.is_alive(), "Context should be waiting")
s.close()
t.join(timeout=0.1)
self.assertFalse(t.is_alive(), "Context should have closed")
def test_gc(self):
"""test close&term by garbage collection alone"""
if PYPY:
raise SkipTest("GC doesn't work ")
# test credit @dln (GH #137):
def gcf():
def inner():
ctx = self.Context()
s = ctx.socket(zmq.PUSH)
inner()
gc.collect()
t = Thread(target=gcf)
t.start()
t.join(timeout=1)
self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context")
def test_cyclic_destroy(self):
"""ctx.destroy should succeed when cyclic ref prevents gc"""
# test credit @dln (GH #137):
class CyclicReference(object):
def __init__(self, parent=None):
self.parent = parent
def crash(self, sock):
self.sock = sock
self.child = CyclicReference(self)
def crash_zmq():
ctx = self.Context()
sock = ctx.socket(zmq.PULL)
c = CyclicReference()
c.crash(sock)
ctx.destroy()
crash_zmq()
def test_term_thread(self):
"""ctx.term should not crash active threads (#139)"""
ctx = self.Context()
evt = Event()
evt.clear()
def block():
s = ctx.socket(zmq.REP)
s.bind_to_random_port('tcp://127.0.0.1')
evt.set()
try:
s.recv()
except zmq.ZMQError as e:
self.assertEqual(e.errno, zmq.ETERM)
return
finally:
s.close()
self.fail("recv should have been interrupted with ETERM")
t = Thread(target=block)
t.start()
evt.wait(1)
self.assertTrue(evt.is_set(), "sync event never fired")
time.sleep(0.01)
ctx.term()
t.join(timeout=1)
self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
def test_destroy_no_sockets(self):
ctx = self.Context()
s = ctx.socket(zmq.PUB)
s.bind_to_random_port('tcp://127.0.0.1')
s.close()
ctx.destroy()
assert s.closed
assert ctx.closed
def test_ctx_opts(self):
if zmq.zmq_version_info() < (3,):
raise SkipTest("context options require libzmq 3")
ctx = self.Context()
ctx.set(zmq.MAX_SOCKETS, 2)
self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2)
ctx.max_sockets = 100
self.assertEqual(ctx.max_sockets, 100)
self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100)
def test_shadow(self):
ctx = self.Context()
ctx2 = self.Context.shadow(ctx.underlying)
self.assertEqual(ctx.underlying, ctx2.underlying)
s = ctx.socket(zmq.PUB)
s.close()
del ctx2
self.assertFalse(ctx.closed)
s = ctx.socket(zmq.PUB)
ctx2 = self.Context.shadow(ctx.underlying)
s2 = ctx2.socket(zmq.PUB)
s.close()
s2.close()
ctx.term()
self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB)
del ctx2
def test_shadow_pyczmq(self):
try:
from pyczmq import zctx, zsocket, zstr
except Exception:
raise SkipTest("Requires pyczmq")
ctx = zctx.new()
a = zsocket.new(ctx, zmq.PUSH)
zsocket.bind(a, "inproc://a")
ctx2 = self.Context.shadow_pyczmq(ctx)
b = ctx2.socket(zmq.PULL)
b.connect("inproc://a")
zstr.send(a, b'hi')
rcvd = self.recv(b)
self.assertEqual(rcvd, b'hi')
b.close()
if False: # disable green context tests
class TestContextGreen(GreenTest, TestContext):
"""gevent subclass of context tests"""
# skip tests that use real threads:
test_gc = GreenTest.skip_green
test_term_thread = GreenTest.skip_green
test_destroy_linger = GreenTest.skip_green
| ellisonbg/pyzmq | zmq/tests/test_context.py | Python | lgpl-3.0 | 8,546 | [
"Brian"
] | f29f6c6ece1c200356d067e908f35690a1fe48cb6225e6707f6925deb9fe94e2 |
"""Display the contents of the implementation cache."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
import gtk
from zeroinstall.injector import namespaces, model
from zeroinstall.zerostore import BadDigest, manifest
from zeroinstall import support
from zeroinstall.support import basedir
from zeroinstall.gtkui import help_box, gtkutils
__all__ = ['CacheExplorer']
ROX_IFACE = 'http://rox.sourceforge.net/2005/interfaces/ROX-Filer'
# Model columns
ITEM = 0
SELF_SIZE = 1
PRETTY_SIZE = 2
TOOLTIP = 3
ITEM_OBJECT = 4
def popup_menu(bev, obj):
menu = gtk.Menu()
for i in obj.menu_items:
if i is None:
item = gtk.SeparatorMenuItem()
else:
name, cb = i
item = gtk.MenuItem(name)
item.connect('activate', lambda item, cb=cb: cb(obj))
item.show()
menu.append(item)
menu.popup(None, None, None, bev.button, bev.time)
def size_if_exists(path):
"Get the size for a file, or 0 if it doesn't exist."
if path and os.path.isfile(path):
return os.path.getsize(path)
return 0
def get_size(path):
"Get the size for a directory tree. Get the size from the .manifest if possible."
man = os.path.join(path, '.manifest')
if os.path.exists(man):
size = os.path.getsize(man)
for line in file(man, 'rb'):
if line[:1] in "XF":
size += long(line.split(' ', 4)[3])
else:
size = 0
for root, dirs, files in os.walk(path):
for name in files:
size += os.path.getsize(os.path.join(root, name))
return size
def summary(iface):
if iface.summary:
return iface.get_name() + ' - ' + iface.summary
return iface.get_name()
def get_selected_paths(tree_view):
"GTK 2.0 doesn't have this built-in"
selection = tree_view.get_selection()
paths = []
def add(model, path, iter):
paths.append(path)
selection.selected_foreach(add)
return paths
# Responses
DELETE = 0
class CachedInterface(object):
def __init__(self, uri, size):
self.uri = uri
self.size = size
def delete(self):
if not os.path.isabs(self.uri):
cached_iface = basedir.load_first_cache(namespaces.config_site,
'interfaces', model.escape(self.uri))
if cached_iface:
#print "Delete", cached_iface
os.unlink(cached_iface)
user_overrides = basedir.load_first_config(namespaces.config_site,
namespaces.config_prog,
'user_overrides', model.escape(self.uri))
if user_overrides:
#print "Delete", user_overrides
os.unlink(user_overrides)
def __cmp__(self, other):
return self.uri.__cmp__(other.uri)
class ValidInterface(CachedInterface):
def __init__(self, iface, size):
CachedInterface.__init__(self, iface.uri, size)
self.iface = iface
self.in_cache = []
def append_to(self, model, iter):
iter2 = model.append(iter,
[self.uri, self.size, None, summary(self.iface), self])
for cached_impl in self.in_cache:
cached_impl.append_to(model, iter2)
def get_may_delete(self):
for c in self.in_cache:
if not isinstance(c, LocalImplementation):
return False # Still some impls cached
return True
may_delete = property(get_may_delete)
class InvalidInterface(CachedInterface):
may_delete = True
def __init__(self, uri, ex, size):
CachedInterface.__init__(self, uri, size)
self.ex = ex
def append_to(self, model, iter):
model.append(iter, [self.uri, self.size, None, self.ex, self])
class LocalImplementation:
may_delete = False
def __init__(self, impl):
self.impl = impl
def append_to(self, model, iter):
model.append(iter, [self.impl.id, 0, None, _('This is a local version, not held in the cache.'), self])
class CachedImplementation:
may_delete = True
def __init__(self, cache_dir, name):
self.impl_path = os.path.join(cache_dir, name)
self.size = get_size(self.impl_path)
self.name = name
def delete(self):
#print "Delete", self.impl_path
support.ro_rmtree(self.impl_path)
def open_rox(self):
os.spawnlp(os.P_WAIT, '0launch', '0launch', ROX_IFACE, '-d', self.impl_path)
def verify(self):
try:
manifest.verify(self.impl_path)
except BadDigest, ex:
box = gtk.MessageDialog(None, 0,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, str(ex))
if ex.detail:
swin = gtk.ScrolledWindow()
buffer = gtk.TextBuffer()
mono = buffer.create_tag('mono', family = 'Monospace')
buffer.insert_with_tags(buffer.get_start_iter(), ex.detail, mono)
text = gtk.TextView(buffer)
text.set_editable(False)
text.set_cursor_visible(False)
swin.add(text)
swin.set_shadow_type(gtk.SHADOW_IN)
swin.set_border_width(4)
box.vbox.pack_start(swin)
swin.show_all()
box.set_resizable(True)
else:
box = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_('Contents match digest; nothing has been changed.'))
box.run()
box.destroy()
menu_items = [(_('Open in ROX-Filer'), open_rox),
(_('Verify integrity'), verify)]
class UnusedImplementation(CachedImplementation):
def append_to(self, model, iter):
model.append(iter, [self.name, self.size, None, self.impl_path, self])
class KnownImplementation(CachedImplementation):
def __init__(self, cached_iface, cache_dir, impl, impl_size):
CachedImplementation.__init__(self, cache_dir, impl.id)
self.cached_iface = cached_iface
self.impl = impl
self.size = impl_size
def delete(self):
CachedImplementation.delete(self)
self.cached_iface.in_cache.remove(self)
def append_to(self, model, iter):
model.append(iter,
[_('Version %(implementation_version)s : %(implementation_id)s') % {'implementation_version': self.impl.get_version(), 'implementation_id': self.impl.id},
self.size, None,
None,
self])
def __cmp__(self, other):
if hasattr(other, 'impl'):
return self.impl.__cmp__(other.impl)
return -1
class CacheExplorer:
"""A graphical interface for viewing the cache and deleting old items."""
def __init__(self, iface_cache):
widgets = gtkutils.Template(os.path.join(os.path.dirname(__file__), 'cache.ui'), 'cache')
self.window = window = widgets.get_widget('cache')
window.set_default_size(gtk.gdk.screen_width() / 2, gtk.gdk.screen_height() / 2)
self.iface_cache = iface_cache
# Model
self.model = gtk.TreeStore(str, int, str, str, object)
self.tree_view = widgets.get_widget('treeview')
self.tree_view.set_model(self.model)
column = gtk.TreeViewColumn(_('Item'), gtk.CellRendererText(), text = ITEM)
column.set_resizable(True)
self.tree_view.append_column(column)
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
column = gtk.TreeViewColumn(_('Size'), cell, text = PRETTY_SIZE)
self.tree_view.append_column(column)
def button_press(tree_view, bev):
if bev.button != 3:
return False
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
obj = self.model[path][ITEM_OBJECT]
if obj and hasattr(obj, 'menu_items'):
popup_menu(bev, obj)
self.tree_view.connect('button-press-event', button_press)
# Tree tooltips
self.tree_view.set_property('has-tooltip', True)
def query_tooltip(widget, x, y, keyboard_mode, tooltip):
x, y = self.tree_view.convert_widget_to_bin_window_coords(x, y)
pos = self.tree_view.get_path_at_pos(x, y)
if pos:
path = pos[0]
row = self.model[path]
tip = row[TOOLTIP]
if tip:
self.tree_view.set_tooltip_cell(tooltip, pos[0], None, None)
tooltip.set_text(tip)
return True
return False
self.tree_view.connect('query-tooltip', query_tooltip)
# Responses
window.set_default_response(gtk.RESPONSE_CLOSE)
selection = self.tree_view.get_selection()
def selection_changed(selection):
any_selected = False
for x in get_selected_paths(self.tree_view):
obj = self.model[x][ITEM_OBJECT]
if obj is None or not obj.may_delete:
window.set_response_sensitive(DELETE, False)
return
any_selected = True
window.set_response_sensitive(DELETE, any_selected)
selection.set_mode(gtk.SELECTION_MULTIPLE)
selection.connect('changed', selection_changed)
selection_changed(selection)
def response(dialog, resp):
if resp == gtk.RESPONSE_CLOSE:
window.destroy()
elif resp == gtk.RESPONSE_HELP:
cache_help.display()
elif resp == DELETE:
self._delete()
window.connect('response', response)
def _delete(self):
errors = []
model = self.model
paths = get_selected_paths(self.tree_view)
paths.reverse()
for path in paths:
item = model[path][ITEM_OBJECT]
assert item.delete
try:
item.delete()
except OSError, ex:
errors.append(str(ex))
else:
model.remove(model.get_iter(path))
self._update_sizes()
if errors:
gtkutils.show_message_box(self.window, _("Failed to delete:\n%s") % '\n'.join(errors))
def show(self):
"""Display the window and scan the caches to populate it."""
self.window.show()
self.window.window.set_cursor(gtkutils.get_busy_pointer())
gtk.gdk.flush()
try:
self._populate_model()
i = self.model.get_iter_root()
while i:
self.tree_view.expand_row(self.model.get_path(i), False)
i = self.model.iter_next(i)
finally:
self.window.window.set_cursor(None)
def _populate_model(self):
# Find cached implementations
unowned = {} # Impl ID -> Store
duplicates = [] # TODO
for s in self.iface_cache.stores.stores:
if os.path.isdir(s.dir):
for id in os.listdir(s.dir):
if id in unowned:
duplicates.append(id)
unowned[id] = s
ok_interfaces = []
error_interfaces = []
# Look through cached interfaces for implementation owners
all = self.iface_cache.list_all_interfaces()
all.sort()
for uri in all:
iface_size = 0
try:
if os.path.isabs(uri):
cached_iface = uri
else:
cached_iface = basedir.load_first_cache(namespaces.config_site,
'interfaces', model.escape(uri))
user_overrides = basedir.load_first_config(namespaces.config_site,
namespaces.config_prog,
'user_overrides', model.escape(uri))
iface_size = size_if_exists(cached_iface) + size_if_exists(user_overrides)
iface = self.iface_cache.get_interface(uri)
except Exception, ex:
error_interfaces.append((uri, str(ex), iface_size))
else:
cached_iface = ValidInterface(iface, iface_size)
for impl in iface.implementations.values():
if impl.local_path:
cached_iface.in_cache.append(LocalImplementation(impl))
if impl.id in unowned:
cached_dir = unowned[impl.id].dir
impl_path = os.path.join(cached_dir, impl.id)
impl_size = get_size(impl_path)
cached_iface.in_cache.append(KnownImplementation(cached_iface, cached_dir, impl, impl_size))
del unowned[impl.id]
cached_iface.in_cache.sort()
ok_interfaces.append(cached_iface)
if error_interfaces:
iter = self.model.append(None, [_("Invalid interfaces (unreadable)"),
0, None,
_("These interfaces exist in the cache but cannot be "
"read. You should probably delete them."),
None])
for uri, ex, size in error_interfaces:
item = InvalidInterface(uri, ex, size)
item.append_to(self.model, iter)
unowned_sizes = []
local_dir = os.path.join(basedir.xdg_cache_home, '0install.net', 'implementations')
for id in unowned:
if unowned[id].dir == local_dir:
impl = UnusedImplementation(local_dir, id)
unowned_sizes.append((impl.size, impl))
if unowned_sizes:
iter = self.model.append(None, [_("Unowned implementations and temporary files"),
0, None,
_("These probably aren't needed any longer. You can "
"delete them."), None])
unowned_sizes.sort()
unowned_sizes.reverse()
for size, item in unowned_sizes:
item.append_to(self.model, iter)
if ok_interfaces:
iter = self.model.append(None,
[_("Interfaces"),
0, None,
_("Interfaces in the cache"),
None])
for item in ok_interfaces:
item.append_to(self.model, iter)
self._update_sizes()
def _update_sizes(self):
"""Set PRETTY_SIZE to the total size, including all children."""
m = self.model
def update(itr):
total = m[itr][SELF_SIZE]
child = m.iter_children(itr)
while child:
total += update(child)
child = m.iter_next(child)
m[itr][PRETTY_SIZE] = support.pretty_size(total)
return total
itr = m.get_iter_root()
while itr:
update(itr)
itr = m.iter_next(itr)
cache_help = help_box.HelpBox(_("Cache Explorer Help"),
(_('Overview'), '\n' +
_("""When you run a program using Zero Install, it downloads the program's 'interface' file, \
which gives information about which versions of the program are available. This interface \
file is stored in the cache to save downloading it next time you run the program.
When you have chosen which version (implementation) of the program you want to \
run, Zero Install downloads that version and stores it in the cache too. Zero Install lets \
you have many different versions of each program on your computer at once. This is useful, \
since it lets you use an old version if needed, and different programs may need to use \
different versions of libraries in some cases.
The cache viewer shows you all the interfaces and implementations in your cache. \
This is useful to find versions you don't need anymore, so that you can delete them and \
free up some disk space.""")),
(_('Invalid interfaces'), '\n' +
_("""The cache viewer gets a list of all interfaces in your cache. However, some may not \
be valid; they are shown in the 'Invalid interfaces' section. It should be fine to \
delete these. An invalid interface may be caused by a local interface that no longer \
exists, by a failed attempt to download an interface (the name ends in '.new'), or \
by the interface file format changing since the interface was downloaded.""")),
(_('Unowned implementations and temporary files'), '\n' +
_("""The cache viewer searches through all the interfaces to find out which implementations \
they use. If no interface uses an implementation, it is shown in the 'Unowned implementations' \
section.
Unowned implementations can result from old versions of a program no longer being listed \
in the interface file. Temporary files are created when unpacking an implementation after \
downloading it. If the archive is corrupted, the unpacked files may be left there. Unless \
you are currently unpacking new programs, it should be fine to delete everything in this \
section.""")),
(_('Interfaces'), '\n' +
_("""All remaining interfaces are listed in this section. You may wish to delete old versions of \
certain programs. Deleting a program which you may later want to run will require it to be downloaded \
again. Deleting a version of a program which is currently running may cause it to crash, so be careful!""")))
| pombredanne/zero-install | zeroinstall/gtkui/cache.py | Python | lgpl-2.1 | 14,749 | [
"VisIt"
] | f13077065c62ea2cfb73a42cb3a29ecc74731fd0a51547162f0b60cfbef52f1c |
from matplotlib import pyplot
from math import cos, sin, atan
import matplotlib
matplotlib.style.use('ggplot')
matplotlib.pyplot.rcParams['figure.figsize'] = (15, 3)
matplotlib.pyplot.rcParams['font.family'] = 'sans-serif'
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, neuron_radius):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer):
self.vertical_distance_between_layers = 6
self.horizontal_distance_between_neurons = 2
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return self.horizontal_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + self.vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * sin(angle)
y_adjustment = self.neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment))
pyplot.gca().add_line(line)
def draw(self, layerType=0):
for neuron in self.neurons:
neuron.draw( self.neuron_radius )
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
# write Text
x_text = self.number_of_neurons_in_widest_layer * self.horizontal_distance_between_neurons
if layerType == 0:
pyplot.text(x_text, self.y, 'Input Layer', fontsize = 12)
elif layerType == -1:
pyplot.text(x_text, self.y, 'Output Layer', fontsize = 12)
else:
pyplot.text(x_text, self.y, 'Hidden Layer '+str(layerType), fontsize = 12)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.layers = []
self.layertype = 0
def add_layer(self, number_of_neurons ):
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer)
self.layers.append(layer)
def draw(self):
pyplot.figure()
for i in range( len(self.layers) ):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw( i )
pyplot.axis('scaled')
pyplot.axis('off')
pyplot.title( 'Autoencoder architecture (scales)', fontsize=15 )
pyplot.show()
class DrawNN():
def __init__( self, neural_network ):
self.neural_network = neural_network
def draw( self ):
widest_layer = max( self.neural_network )
network = NeuralNetwork( widest_layer )
for l in self.neural_network:
network.add_layer(l)
network.draw() | evelkey/vahun | vahun/neuroplot.py | Python | apache-2.0 | 4,110 | [
"NEURON"
] | fcc3c8abd1c2fdb16ae890d0bc5863ddb82149c698e56cfb8b9e553c4a739c7f |
import json
import time, os, fnmatch
import re
import textwrap
def make_date_string(t):
lt=time.localtime(t)
return '%(y)04d-%(m)02d-%(d)02d' % { 'y' : lt.tm_year, 'm' : lt.tm_mon,
'd' : lt.tm_mday }
def replace_path_bits(path,user,maxlen):
res=re.sub('/work/[0-9]+/'+user,r'\$WORK',path)
res=re.sub('/scratch/[0-9]+/'+user,r'\$SCRATCH',res)
res=re.sub('.*?/home.*?/[0-9]+/'+user,'~'+user,res)
return res
def replace_and_shorten_path_bits(path,user,maxlen):
res=replace_path_bits(path,user,maxlen)
if len(res) > maxlen:
res=re.sub(r'/[^/][^/]*/..*/(..*/)',r'/.../\1',res)
return res
def replace_and_wrap_path_bits(path,user,maxlen,indent=0):
res=replace_path_bits(path,user,maxlen)
if len(res) < maxlen:
return res
wrapped=textwrap.wrap(' '.join(res.split('/')),maxlen)
res=''
for i in range(len(wrapped)-1):
res += '/'.join(wrapped[i].split(' ')) + '/\n' + (' '*indent)
res+='/'.join(wrapped[-1].split(' '))
return res
class LariatDataException(Exception):
def __init__(self,arg):
self.value=arg
print self.value
class LariatData:
def __init__(self,jobid,end_epoch=-1,directory=None,daysback=0,olddata=None):
self.jobid=jobid
# Initialize to invalid/empty states
self.id=0
self.ld=None
self.user='nobody'
self.exc='unknown'
self.cwd='unknown'
self.threads=1
self.wayness=-1
# Find the set of JSON files matching the end_epoch date
newdata=None
if end_epoch > 0 and directory != None:
matches=[]
for day in range(daysback+1):
ds=make_date_string(end_epoch-day*24*3600)
for root, dirnames, filenames in os.walk(directory):
for fn in fnmatch.filter(filenames,'*'+ds+'.json'):
matches.append(os.path.join(root,fn))
if len(matches) != 0:
newdata=dict()
for m in matches:
try:
newdata.update(json.load(open(m))) # Should be only one match
except:
json_str = open(m).read()
json_str = re.sub(r'\\','',json_str)
newdata.update(json.loads(json_str))
else:
print 'File for ' + self.jobid + ' not found in ' + directory
if olddata != None:
self.ld=olddata
else:
self.ld=dict()
if newdata != None:
self.ld.update(newdata)
try:
self.ld[jobid].sort(key=lambda ibr: int(ibr['startEpoch']))
self.id=self.ld[jobid][0]['jobID']
self.user=self.ld[jobid][0]['user']
self.exc=replace_and_shorten_path_bits(self.ld[jobid][0]['exec'],
self.user,60)
self.cwd=replace_and_shorten_path_bits(self.ld[jobid][0]['cwd'],
self.user,60)
self.threads=self.ld[jobid][0]['numThreads']
self.wayness=int(self.ld[jobid][0]['numCores'])/int(self.ld[jobid][0]['numNodes'])
except KeyError:
print str(jobid) + ' did not call ibrun' + \
' or has no lariat data for some other reason'
self.equiv_patterns = {
r'^charmrun' : 'Charm++*',
r'^wrf' : 'WRF*',
r'^vasp' : 'VASP*',
r'^run\.cctm' : 'CMAQ CCTM*',
r'^lmp_' : 'LAMMPS*',
r'^mdrun' : 'Gromacs*',
r'^enzo' : 'ENZO*',
r'^dlpoly' : 'DL_POLY*',
r'^su3_' : 'MILC*',
r'^qcprog' : 'QCHEM*',
r'^namd2' : 'NAMD*',
r'^cactus' : 'Cactus*',
r'^pw.x' : 'Q. Esp*',
r'^pmemd' : 'Amber*',
r'^sander' : 'Amber*',
r'^charmm' : 'CHARMM*',
r'^c37b1' : 'CHARMM*',
}
def title(self):
title='E: ' + self.exc
if (self.cwd != 'unknown'):
if ((len(self.exc) + len (self.cwd)) > 50):
sep=',\n'
else:
sep=', '
title += sep + 'CWD: ' + self.cwd
return title
def comp_name(self,name,patterns):
for i in patterns.keys():
if re.search(i,name):
return patterns[i]
return name
def get_runtimes(self,end_epoch):
start_times=[int(ibr['startEpoch']) for ibr in self.ld[self.id]]
start_times.extend([end_epoch])
st2=sorted(start_times)
return [(a-b) for (a,b) in zip(st2[1:],st2[:-1])]
| ubccr/tacc_stats | analyze/process_pickles/lariat_utils.py | Python | lgpl-2.1 | 4,195 | [
"Amber",
"CHARMM",
"DL_POLY",
"Gromacs",
"LAMMPS",
"NAMD",
"VASP"
] | 3125c8c621f8850cce9e84282e539aeb6dad9377089ede0fb0725cd033b359fb |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from manila.api import xmlutil
from manila import test
class SelectorTest(test.TestCase):
obj_for_test = {'test': {'name': 'test',
'values': [1, 2, 3],
'attrs': {'foo': 1,
'bar': 2,
'baz': 3, }, }, }
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'), ]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {'test': {'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {'a': 1,
'b': 2,
'c': 3,
'd': 4, },
'image': {'name': 'image_foobar', 'id': 42, }, }, }
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
| tucbill/manila | manila/tests/api/test_xmlutil.py | Python | apache-2.0 | 25,565 | [
"VisIt"
] | 2d1e17b45f3a94df70c1b0a596b667b62082fcd7a2115aa1a6d84f09f4a6c6ec |
import numpy as np
import scipy.linalg as slg
from frankenstein import scf, mp
from frankenstein.tools.io_utils import dumpMat
from frankenstein.tools.tensor_utils import ao2mo_xform_nosymm
def get_Vovov(V, C, nocc):
# Vovov = np.einsum("pqrs,pi,qa,rj,sb->iajb", V, \
# C[:,:nocc],C[:,nocc:],C[:,:nocc],C[:,nocc:], optimize=True)
nao = C.shape[0]
nmo = C.shape[1]
nvirt = nmo - nocc
Vovov = np.zeros([nocc,nvirt,nocc,nvirt])
Vaaov = np.zeros([nao,nao,nocc,nvirt])
for p in range(nao):
for q in range(p,nao):
X = C[:,:nocc].T @ V[p,q] @ C[:,nocc:]
if p == q:
Vaaov[p,q] = X * 0.5
else:
Vaaov[p,q] = X
for j in range(nocc):
for b in range(nvirt):
Y = Vaaov[:,:,j,b]
Y += Y.T
Vovov[:,:,j,b] = C[:,:nocc].T @ Y @ C[:,nocc:]
del Vaaov
return Vovov
def get_Diajb(moe, nocc):
nao = moe.shape[0]
nvirt = nao - nocc
Diajb = moe[:nocc].reshape(nocc,1,1,1) + \
moe[:nocc].reshape(nocc,1) - \
moe[nocc:].reshape(nvirt,1,1) - \
moe[nocc:]
return Diajb
def get_mp2_t2(V, C, moe, nocc):
nao = C.shape[0]
nvirt = nao - nocc
Vovov = get_Vovov(V, C, nocc)
Diajb = get_Diajb(moe, nocc)
t2 = Vovov / Diajb
return t2
def get_emp2_from_eri(V, C, moe, nocc):
Vovov = get_Vovov(V, C, nocc)
t2 = get_mp2_t2(V, C, moe, nocc)
return np.sum((2.*Vovov - Vovov.transpose(0,3,2,1)) * t2)
def get_emp2_from_t2(V, C, t2, nocc):
t2 = t2.transpose(0,2,1,3) # phys (oovv) --> chem (ovov)
Vovov = np.einsum("pqrs,pi,qa,rj,sb->iajb", V, \
C[:,:nocc],C[:,nocc:],C[:,:nocc],C[:,nocc:], optimize=True)
return np.sum((2.*Vovov - Vovov.transpose(0,3,2,1)) * t2)
def get_mp2_rdm1(V, C, moe, nocc):
t2 = get_mp2_t2(V, C, moe, nocc)
t2_ = t2.transpose(0,3,2,1)
dm1oo = -np.einsum("iakb,jakb->ij", t2,2.*t2-t2_)
dm1vv = np.einsum("iajc,ibjc->ab", t2,2.*t2-t2_)
return slg.block_diag(dm1oo,dm1vv)
def get_cphf_U(mf, v, vo_only=False):
from cphf_utils import cphf_kernel, get_full_u
u = cphf_kernel(mf, v)
if vo_only:
return u
else:
U = get_full_u(mf, v, u)
return U
def get_scf_rdm1_mo_lr(mf, v):
U = get_cphf_U(mf, v)
nao = mf.nao
nocc = mf.nocc
rho = np.zeros([nao,nao])
rho[np.diag_indices(nocc)] = 1.
return U@rho + rho@U.T
def get_mp2_rdm1_mo_lr1(mf, v, rdm1_mo_mp2):
U = get_cphf_U(mf, v)
return U@rdm1_mo_mp2 + rdm1_mo_mp2@U.T
def get_Vovov_lr(V, C, U, nocc):
def get_Vovov_gen(V, C1, C2, C3, C4, nocc):
Vovov = np.einsum("pqrs,pi,qa,rj,sb->iajb", V, \
C1[:,:nocc],C2[:,nocc:],C3[:,:nocc],C4[:,nocc:], optimize=True)
return Vovov
Vovov_lr1 = get_Vovov_gen(V,C@U,C,C,C,nocc)
Vovov_lr2 = get_Vovov_gen(V,C,C@U,C,C,nocc)
Vovov_lr = Vovov_lr1 + Vovov_lr1.transpose(2,3,0,1) + \
Vovov_lr2 + Vovov_lr2.transpose(2,3,0,1)
return Vovov_lr
def get_Vovov_lr_batch(Vavov, Voaov, C, U, nocc):
CU = C @ U
Vovov_lr1 = np.einsum("pajb,pi->iajb", Vavov, CU[:,:nocc], optimize=True)
Vovov_lr2 = np.einsum("ipjb,pa->iajb", Voaov, CU[:,nocc:], optimize=True)
Vovov_lr = Vovov_lr1 + Vovov_lr1.transpose(2,3,0,1) + \
Vovov_lr2 + Vovov_lr2.transpose(2,3,0,1)
return Vovov_lr
def get_moe_lr(V, C, v, U, nocc):
nao = V.shape[0]
nvirt = nao - nocc
u = U[nocc:,:nocc]
moe_lr = np.zeros(nao)
g1 = np.einsum("pqrs,pj,qj,ra,si->jai", \
V, C, C, C[:,nocc:], C[:,:nocc], optimize=True)
g2 = np.einsum("pqrs,pj,qi,ra,sj->jia", \
V, C, C[:,:nocc], C[:,nocc:], C, optimize=True)
for p in range(nao):
moe_lr[p] += C[:,p].T @ v @ C[:,p]
moe_lr[p] += 4.*np.sum(g1[p]*u) - 2.*np.sum(g2[p].T*u)
return moe_lr
def get_moe_lr_batch(V, C, vs, Us, nocc):
nao = V.shape[0]
nvirt = nao - nocc
g1 = np.einsum("pqrs,pj,qj,ra,si->jai", \
V, C, C, C[:,nocc:], C[:,:nocc], optimize=True)
g2 = np.einsum("pqrs,pj,qi,ra,sj->jia", \
V, C, C[:,:nocc], C[:,nocc:], C, optimize=True)
moes_lr = []
for v, U in zip(vs, Us):
u = U[nocc:,:nocc]
moe_lr = np.zeros(nao)
for p in range(nao):
moe_lr[p] += C[:,p].T @ v @ C[:,p]
moe_lr[p] += 4.*np.sum(g1[p]*u) - 2.*np.sum(g2[p].T*u)
moes_lr.append(moe_lr)
return moes_lr
def get_mp2_rdm1_mo_lr2(mf, v):
nao = mf.nao
nocc = mf.nocc
nvirt = nao - nocc
V = mf.mol.V
C = mf.mo_coeff
moe = mf.mo_energy
U = get_cphf_U(mf, v)
t2 = get_mp2_t2(V, C, moe, nocc)
Vovov_lr = get_Vovov_lr(V, C, U, nocc)
Diajb = get_Diajb(moe, nocc)
moe_lr = get_moe_lr(V, C, v, U, nocc)
Diajb_lr = get_Diajb(moe_lr, nocc)
t2_lr = (Vovov_lr - t2*Diajb_lr) / Diajb
dm1oo_lr = -np.einsum("iakb,jakb->ij", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
dm1oo_lr += dm1oo_lr.T
dm1vv_lr = np.einsum("iajc,ibjc->ab", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
dm1vv_lr += dm1vv_lr.T
return slg.block_diag(dm1oo_lr, dm1vv_lr)
def get_tot_rdm1_mo_an(mf, v):
V = mf.mol.V
C = mf.mo_coeff
moe = mf.mo_energy
nao = mf.nao
nocc = mf.nocc
U = get_cphf_U(mf, v)
# scf contribution
rho = np.zeros([nao,nao])
rho[np.diag_indices(nocc)] = 1.
rdm1_scf_mo_lr = U@rho + rho@U.T
# mp2 contribution 1
Vovov = get_Vovov(V, C, nocc)
Diajb = get_Diajb(moe, nocc)
t2 = Vovov / Diajb
rdm1_mo_mp2 = np.zeros([nao,nao])
rdm1_mo_mp2[:nocc,:nocc] = -np.einsum("iakb,jakb->ij", \
t2, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mo_mp2[nocc:,nocc:] = np.einsum("iajc,ibjc->ab", \
t2, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mp2_mo_lr1 = U@rdm1_mo_mp2 + rdm1_mo_mp2@U.T
# mp2 contribution 2
Vovov_lr = get_Vovov_lr(V, C, U, nocc)
moe_lr = get_moe_lr(V, C, v, U, nocc)
Diajb_lr = get_Diajb(moe_lr, nocc)
t2_lr = (Vovov_lr - t2*Diajb_lr) / Diajb
rdm1_mp2_mo_lr2 = np.zeros([nao,nao])
rdm1_mp2_mo_lr2[:nocc,:nocc] = \
-np.einsum("iakb,jakb->ij", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mp2_mo_lr2[nocc:,nocc:] = \
np.einsum("iajc,ibjc->ab", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mp2_mo_lr2 += rdm1_mp2_mo_lr2.T
# collect contributions
rdm1_tot_mo_lr = rdm1_scf_mo_lr + rdm1_mp2_mo_lr1 + rdm1_mp2_mo_lr2
return rdm1_tot_mo_lr
def get_tot_rdm1_mo_an_batch(mf, vs, Us):
V = mf.mol.V
C = mf.mo_coeff
moe = mf.mo_energy
nao = mf.nao
nocc = mf.nocc
nv = len(vs)
# intermediates used by all Us/vs
rho = np.zeros([nao,nao])
rho[np.diag_indices(nocc)] = 1.
Vovov = get_Vovov(V, C, nocc)
Diajb = get_Diajb(moe, nocc)
t2 = Vovov / Diajb
rdm1_mo_mp2 = np.zeros([nao,nao])
rdm1_mo_mp2[:nocc,:nocc] = -np.einsum("iakb,jakb->ij", \
t2, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mo_mp2[nocc:,nocc:] = np.einsum("iajc,ibjc->ab", \
t2, 2.*t2-t2.transpose(0,3,2,1))
moes_lr = get_moe_lr_batch(V, C, vs, Us, nocc)
Vavov = np.einsum("pqrs,qa,rj,sb->pajb", V, \
C[:,nocc:], C[:,:nocc], C[:,nocc:], optimize=True)
Voaov = np.einsum("pqrs,pi,rj,sb->iqjb", V, \
C[:,:nocc], C[:,:nocc], C[:,nocc:], optimize=True)
# determine lr for each U/v
rdm1s_tot_mo_lr = [None for i in range(nv)]
ind = -1
for v, U, moe_lr in zip(vs, Us, moes_lr):
ind += 1
# scf contribution
rdm1_scf_mo_lr = U@rho + rho@U.T
# mp2 contribution 1
rdm1_mp2_mo_lr1 = U@rdm1_mo_mp2 + rdm1_mo_mp2@U.T
# mp2 contribution 2
Vovov_lr = get_Vovov_lr_batch(Vavov, Voaov, C, U, nocc)
Diajb_lr = get_Diajb(moe_lr, nocc)
t2_lr = (Vovov_lr - t2*Diajb_lr) / Diajb
rdm1_mp2_mo_lr2 = np.zeros([nao,nao])
rdm1_mp2_mo_lr2[:nocc,:nocc] = \
-np.einsum("iakb,jakb->ij", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mp2_mo_lr2[nocc:,nocc:] = \
np.einsum("iajc,ibjc->ab", t2_lr, 2.*t2-t2.transpose(0,3,2,1))
rdm1_mp2_mo_lr2 += rdm1_mp2_mo_lr2.T
# collect contributions
rdm1s_tot_mo_lr[ind] = rdm1_scf_mo_lr + rdm1_mp2_mo_lr1 + rdm1_mp2_mo_lr2
return rdm1s_tot_mo_lr
def get_tot_rdm1_mo_fd(h, V, nocc, v, eps=1.E-2):
def get_tot_rdm1_ao(h, V, nocc, D0):
mf = scf.RHF(h=h, V=V, nocc=nocc, verbose="mute")
mf.kernel(D0=D0, verbose="mute")
mmp = mp.MP2(mf)
mmp.kernel(rdm_level=1)
return mmp.rdm1
mf = scf.RHF(h=h, V=V, nocc=nocc)
mf.kernel(verbose="mute")
C = mf.mo_coeff
rdm1_ao_ff = get_tot_rdm1_ao(h+2*eps*v, V, nocc, mf.rdm1)
rdm1_ao_f = get_tot_rdm1_ao(h+eps*v, V, nocc, mf.rdm1)
rdm1_ao_b = get_tot_rdm1_ao(h-eps*v, V, nocc, mf.rdm1)
rdm1_ao_bb = get_tot_rdm1_ao(h-2*eps*v, V, nocc, mf.rdm1)
rdm1_ao_lr_fd = (-rdm1_ao_ff + 8.*rdm1_ao_f - \
8.*rdm1_ao_b + rdm1_ao_bb) / 12. / eps
rdm1_mo_lr_fd = C.T @ rdm1_ao_lr_fd @ C
return rdm1_mo_lr_fd
def test_mp2_t2():
np.random.seed(17)
nao = 10
nocc = 4
nvirt = nao - nocc
from frankenstein.tools.lat_utils import get_random_hV
h, V = get_random_hV(nao)
mf = scf.RHF(h=h, V=V, nocc=nocc)
mf.kernel(verbose="mute")
C = mf.mo_coeff
v = np.zeros([nao,nao])
n0 = 3
shift = 0
vs = []
for i in range(n0):
for j in range(i,n0):
v[i+shift,j+shift] = v[j+shift,i+shift] = 1.
vs.append(v.copy())
v[i+shift,j+shift] = v[j+shift,i+shift] = 0.
from cphf_utils import cphf_kernel_batch, get_full_u_batch
us = cphf_kernel_batch(mf, vs)
Us = get_full_u_batch(mf, vs, us)
rdm1s_mo_tot_lr = get_tot_rdm1_mo_an_batch(mf, vs, Us)
errs = np.zeros([n0,n0])
errs_norm = np.zeros([n0,n0])
errs_ao = np.zeros([n0,n0])
ind = -1
for i in range(n0):
for j in range(i,n0):
ind += 1
v = vs[ind]
rdm1_mo_tot_lr_fd = get_tot_rdm1_mo_fd(h, V, nocc, v)
err = np.max(np.abs(rdm1s_mo_tot_lr[ind] - rdm1_mo_tot_lr_fd))
err_norm = np.linalg.norm(rdm1s_mo_tot_lr[ind] - rdm1_mo_tot_lr_fd)
err_ao = np.max(np.abs(C@(rdm1s_mo_tot_lr[ind] - \
rdm1_mo_tot_lr_fd)@C.T))
errs[i,j] = errs[j,i] = err
errs_norm[i,j] = errs_norm[j,i] = err_norm
errs_ao[i,j] = errs_ao[j,i] = err_ao
dumpMat(errs, "MO basis, max error", fmt="%.1E")
dumpMat(errs_norm, "MO basis, rms error", fmt="%.1E")
dumpMat(errs_ao, "AO basis, rms error", fmt="%.1E")
##################################
# The real thing starts here
##################################
class MP2_ERIS:
def __init__(self, mf):
nao = mf.nao
nocc = mf.nocc
nvirt = nao - nocc
V = mf.mol.V
C = mf.mo_coeff
Co = C[:,:nocc]
Cv = C[:,nocc:]
moe = mf.mo_energy
self.nao = nao
self.nocc = nocc
self.nvirt = nvirt
self.C = C
self.moe = moe
self.ovov = np.einsum("pqrs,pi,qa,rj,sb->iajb", V, \
Co, Cv, Co, Cv, optimize=True)
self.vovo = self.ovov.transpose(1,0,3,2)
self.vvoo = np.einsum("pqrs,pa,qb,ri,sj->abij", V, \
Cv, Cv, Co, Co, optimize=True)
self.oovo = np.einsum("pqrs,pi,qj,ra,sk->ijak", V, \
Co, Co, Cv, Co, optimize=True)
self.vvvo = np.einsum("pqrs,pa,qb,rc,si->abci", V, \
Cv, Cv, Cv, Co, optimize=True)
self.Dia = moe[:nocc].reshape(nocc,1) - moe[nocc:]
self.Diajb = self.Dia.reshape(nocc,nvirt,1,1) + self.Dia
self.t2 = self.ovov / self.Diajb
def get_mp2_rdm2(eris, dm1oo, dm1vv, dm1vo):
nao = eris.nao
nocc = eris.nocc
t2 = eris.t2
Ioo = np.eye(nocc)
rdm2 = np.zeros([nao,nao,nao,nao])
# scf contribution
rdm2[:nocc,:nocc,:nocc,:nocc] = \
2.*np.einsum("ij,kl->ijkl", Ioo, Ioo) - \
np.einsum("il,kj->ijkl", Ioo, Ioo)
# oooo, direct + exchange
rdm2[:nocc,:nocc,:nocc,:nocc] += \
2.*(np.einsum("ij,kl->ijkl", dm1oo, Ioo) + \
np.einsum("ij,kl->ijkl", Ioo, dm1oo)) - (\
np.einsum("il,kj->ijkl", dm1oo, Ioo) +
np.einsum("il,kj->ijkl", Ioo, dm1oo))
# vvoo/oovv, direct
rdm2[nocc:,nocc:,:nocc,:nocc] = 2.*np.einsum("ab,kl->abkl", dm1vv, Ioo)
rdm2[:nocc,:nocc,nocc:,nocc:] = 2.*np.einsum("kl,ab->klab", Ioo, dm1vv)
# vvoo/oovv, exchange
rdm2[nocc:,:nocc,:nocc,nocc:] = -np.einsum("ab,kl->alkb", dm1vv, Ioo)
rdm2[:nocc,nocc:,nocc:,:nocc] = -np.einsum("ab,kl->kbal", dm1vv, Ioo)
# ovov/vovo
rdm2[:nocc,nocc:,:nocc,nocc:] = 2.*t2 - t2.transpose(0,3,2,1)
rdm2[nocc:,:nocc,nocc:,:nocc] = 2.*t2.transpose(1,0,3,2) - t2.transpose(1,2,3,0)
# orbital relaxation contributions
rdm2[nocc:,:nocc,:nocc,:nocc] += 2.*np.einsum("ai,kl->aikl", dm1vo, Ioo)
rdm2[:nocc,nocc:,:nocc,:nocc] += 2.*np.einsum("ai,kl->iakl", dm1vo, Ioo)
rdm2[:nocc,:nocc,nocc:,:nocc] += 2.*np.einsum("ai,kl->klai", dm1vo, Ioo)
rdm2[:nocc,:nocc,:nocc,nocc:] += 2.*np.einsum("ai,kl->klia", dm1vo, Ioo)
rdm2[nocc:,:nocc,:nocc,:nocc] -= np.einsum("al,ki->aikl", dm1vo, Ioo)
rdm2[:nocc,:nocc,:nocc,nocc:] -= np.einsum("al,ki->lika", dm1vo, Ioo)
rdm2[:nocc,:nocc,nocc:,:nocc] -= np.einsum("al,ki->klai", dm1vo, Ioo)
rdm2[:nocc,nocc:,:nocc,:nocc] -= np.einsum("al,ki->kali", dm1vo, Ioo)
return rdm2
def get_mp2_B_eris(eris):
"""B_aibj = delta_aibj (ei-ea) - 4*Vaibj + Vajbi + Vabji
"""
nao = eris.nao
nocc = eris.nocc
nvirt = nao - nocc
# xform
Vvovo = eris.vovo
Vvvoo = eris.vvoo
# diag
diag = eris.Dia.T.reshape(nvirt*nocc)
# compute
B = (-4.*Vvovo + Vvovo.transpose(0,3,2,1) + \
Vvvoo.transpose(0,2,1,3)).reshape(nvirt*nocc,nvirt*nocc)
B[np.diag_indices(nvirt*nocc)] += diag
return B
def get_mp2_X_eris(eris, dm1oo, dm1vv):
nao = eris.nao
nocc = eris.nocc
nvirt = nao - nocc
Voovo = eris.oovo
Vvvvo = eris.vvvo
t2 = eris.t2
X = np.einsum("kj,kjai->ai", dm1oo, 2.*Voovo-Voovo.transpose(0,3,2,1)) + \
np.einsum("bc,bcai->ai", dm1vv, 2.*Vvvvo-Vvvvo.transpose(2,1,0,3)) + \
np.einsum("icjb,acbj->ai", t2, \
2.*Vvvvo-Vvvvo.transpose(0,2,1,3), optimize=True) - \
np.einsum("kajb,kibj->ai", t2, \
2.*Voovo-Voovo.transpose(3,1,2,0), optimize=True)
X = X.reshape(nvirt*nocc)
return X
def cphf_kernel_batch(eris, B, vs, thresh=1E8):
nao = eris.nao
nocc = eris.nocc
nvirt = nao - nocc
C = eris.C
moe = eris.moe
nv = len(vs)
# compute all rhs's
Qs = np.zeros([nvirt*nocc, nv])
for i in range(nv):
Qs[:,i] = (C[:,nocc:].T@vs[i]@C[:,:nocc]).reshape(nvirt*nocc)
# vo blocks
us = np.linalg.solve(B, Qs)
# full U's
Voovo = eris.oovo
Vvvvo = eris.vvvo
Us = [None for i in range(nv)]
for i in range(nv):
vmo = C.T @ vs[i] @ C
uvo = us[:,i].reshape(nvirt, nocc)
U = np.zeros([nao,nao])
U[nocc:,:nocc] = uvo
U[:nocc,nocc:] = -uvo.T
# occupied-occupied block
# ( Qjk + sum_ai (4*Vjkai-Vjiak-Vikaj)*uai ) / (ek - ej)
denom_oo = moe[:nocc].reshape(nocc,1) - moe[:nocc]
denom_oo[np.diag_indices(nocc)] = 1.
if np.sum(np.abs(denom_oo**-1) > thresh) > 0:
raise RuntimeError
U[:nocc,:nocc] = -(vmo[:nocc,:nocc] + \
np.einsum("jkai,ai->jk", 4.*Voovo - \
Voovo.transpose(0,3,2,1) - \
Voovo.transpose(3,1,2,0), \
uvo)) / denom_oo
U[np.diag_indices(nocc)] = 0.
# virtual-virtual block
# ( Qbc + sum_ai (4*Vbcai-Vacbi-Vbaci)*uai ) / (ec - eb)
denom_vv = moe[nocc:].reshape(nvirt,1) - moe[nocc:]
denom_vv[np.diag_indices(nvirt)] = 1.
if np.sum(np.abs(denom_vv**-1) > thresh) > 0:
raise RuntimeError
uvv = -(vmo[nocc:,nocc:] + \
np.einsum("bcai,ai->bc", 4.*Vvvvo - \
Vvvvo.transpose(2,1,0,3) - \
Vvvvo.transpose(0,2,1,3), \
uvo)) / denom_vv
uvv[np.diag_indices(nvirt)] = 0.
U[nocc:,nocc:] = uvv
Us[i] = U
return Us
def get_Vvvoo_lr_batch(Vavoo, Vvvoa, C, U, nocc):
Vvvoo_lr1 = np.einsum("pbij,pa->abij", Vavoo, (C@U)[:,nocc:], optimize=True)
Vvvoo_lr2 = np.einsum("abis,sj->abij", Vvvoa, (C@U)[:,:nocc], optimize=True)
Vvvoo_lr = Vvvoo_lr1 + Vvvoo_lr1.transpose(1,0,2,3) + \
Vvvoo_lr2 + Vvvoo_lr2.transpose(0,1,3,2)
return Vvvoo_lr
def get_mp2_B_lr_eris(mf, v, U, moe_lr, Vvvoo_lr, Vvovo_lr):
nao = mf.nao
nocc = mf.nocc
nvirt = nao - nocc
# diag
diag_lr = (moe_lr[:nocc]-moe_lr[nocc:].reshape(nvirt,1)).reshape(nvirt*nocc)
# compute
B_lr = (-4.*Vvovo_lr + Vvovo_lr.transpose(0,3,2,1) + \
Vvvoo_lr.transpose(0,2,1,3)).reshape(nvirt*nocc,nvirt*nocc)
B_lr[np.diag_indices(nvirt*nocc)] += diag_lr
return B_lr
def get_Voovo_lr_batch(Vaovo, Vooao, Voova, C, U, nocc):
Voovo_lr1 = np.einsum("pjak,pi->ijak", Vaovo, (C@U)[:,:nocc], optimize=True)
Voovo_lr2 = np.einsum("ijrk,ra->ijak", Vooao, (C@U)[:,nocc:], optimize=True)
Voovo_lr3 = np.einsum("ijas,sk->ijak", Voova, (C@U)[:,:nocc], optimize=True)
Voovo_lr = Voovo_lr1 + Voovo_lr1.transpose(1,0,2,3) + \
Voovo_lr2 + Voovo_lr3
return Voovo_lr
def get_Vvvvo_lr_batch(Vavvo, Vvvao, Vvvva, C, U, nocc):
Vvvvo_lr1 = np.einsum("pbci,pa->abci", Vavvo, (C@U)[:,nocc:], optimize=True)
Vvvvo_lr2 = np.einsum("abri,rc->abci", Vvvao, (C@U)[:,nocc:], optimize=True)
Vvvvo_lr3 = np.einsum("abcs,si->abci", Vvvva, (C@U)[:,:nocc], optimize=True)
Vvvvo_lr = Vvvvo_lr1 + Vvvvo_lr1.transpose(1,0,2,3) + \
Vvvvo_lr2 + Vvvvo_lr3
return Vvvvo_lr
def get_mp2_X_lr_eris(eris, v, U, t2_lr, Voovo_lr, Vvvvo_lr, dm1oovv, dm1oovv_lr):
nao = eris.nao
nocc = eris.nocc
nvirt = nao - nocc
t2 = eris.t2
Voovo = eris.oovo
Vvvvo = eris.vvvo
dm1oo = dm1oovv[:nocc,:nocc]
dm1vv = dm1oovv[nocc:,nocc:]
dm1oo_lr = dm1oovv_lr[:nocc,:nocc]
dm1vv_lr = dm1oovv_lr[nocc:,nocc:]
X_lr = np.einsum("kj,kjai->ai", dm1oo_lr, 2.*Voovo-Voovo.transpose(0,3,2,1)) + \
np.einsum("bc,bcai->ai", dm1vv_lr, 2.*Vvvvo-Vvvvo.transpose(2,1,0,3)) + \
np.einsum("icjb,acbj->ai", t2_lr, \
2.*Vvvvo-Vvvvo.transpose(0,2,1,3), optimize=True) - \
np.einsum("kajb,kibj->ai", t2_lr, \
2.*Voovo-Voovo.transpose(3,1,2,0), optimize=True) + \
np.einsum("kj,kjai->ai", dm1oo, 2.*Voovo_lr-Voovo_lr.transpose(0,3,2,1)) + \
np.einsum("bc,bcai->ai", dm1vv, 2.*Vvvvo_lr-Vvvvo_lr.transpose(2,1,0,3)) + \
np.einsum("icjb,acbj->ai", t2, \
2.*Vvvvo_lr-Vvvvo_lr.transpose(0,2,1,3), optimize=True) - \
np.einsum("kajb,kibj->ai", t2, \
2.*Voovo_lr-Voovo_lr.transpose(3,1,2,0), optimize=True)
X_lr = X_lr.reshape(nvirt*nocc)
return X_lr
def mp2_rdm(mf, eris, ret_rdm2=False, vs=None, timing=False):
nao = eris.nao
nocc = eris.nocc
nvirt = nao - nocc
Diajb = eris.Diajb
t2 = eris.t2
V = mf.mol.V
C = mf.mo_coeff
Co = C[:,:nocc]
Cv = C[:,nocc:]
### make mp2 rdm1 ###
# oo/vv
if timing:
start = time.time()
dm1oo = -np.einsum("iakb,jakb->ij", t2, 2.*t2-t2.transpose(0,3,2,1), optimize=True)
dm1vv = np.einsum("iajc,ibjc->ab", t2, 2.*t2-t2.transpose(0,3,2,1), optimize=True)
if timing:
end = time.time()
print(" ** timing-dm1 oo/vv : {:.6f}".format(end-start), flush=True)
# vo
if timing:
start = time.time()
B = get_mp2_B_eris(eris)
X = get_mp2_X_eris(eris, dm1oo, dm1vv)
dm1vo = np.linalg.solve(B.T, X).reshape(nvirt,nocc)
if timing:
end = time.time()
print(" ** timing-dm1 ov : {:.6f}".format(end-start), flush=True)
# collect
dm1_mp2 = np.block([[dm1oo, dm1vo.T], [dm1vo, dm1vv]])
# rhf contributions
dm1_rhf = np.zeros([nao,nao])
dm1_rhf[np.diag_indices(nocc)] = 1.
# xform to AO basis
dm1_ao = C@(dm1_mp2+dm1_rhf)@C.T
### make mp2 rdm2 (rhf contributions included) ###
if ret_rdm2:
# directly form AO basis rdm2
if timing:
start = time.time()
dm2_ao = ao2mo_xform_nosymm(\
get_mp2_rdm2(eris, dm1oo, dm1vv, dm1vo), C.T)
if timing:
end = time.time()
print(" ** timing-dm2 ao : {:.6f}".format(end-start), flush=True)
else:
dm2_ao = None
# return here if rdm1_lr is not requested
if vs is None:
return dm1_ao, dm2_ao
### cphf ###
nv = len(vs)
if timing:
start = time.time()
Us = cphf_kernel_batch(eris, B, vs)
if timing:
end = time.time()
print(" ** timing-cphf Us : {:.6f}".format(end-start), flush=True)
### compute common intermediates ###
if timing:
start = time.time()
# for Diajb_lr
moes_lr = get_moe_lr_batch(V, C, vs, Us, nocc)
# for Vovov_lr
Vavov = np.einsum("pqrs,qa,rj,sb->pajb", V, Cv, Co, Cv, optimize=True)
Voaov = np.einsum("pqrs,pi,rj,sb->iqjb", V, Co, Co, Cv, optimize=True)
# for Vvvoo_lr
Vavoo = np.einsum("pqrs,qa,ri,sj->paij", V, Cv, Co, Co, optimize=True)
Vvvoa = np.einsum("pqrs,pa,qb,ri->abis", V, Cv, Cv, Co, optimize=True)
# for Voovo_lr
Vaovo = Voaov.transpose(1,0,3,2)
Vooao = np.einsum("pqrs,pi,qj,sk->ijrk", V, Co, Co, Co, optimize=True)
Voova = Vavoo.transpose(2,3,1,0)
# for Vvvvo_lr
Vavvo = np.einsum("pqrs,qa,rb,si->pabi", V, Cv, Cv, Co, optimize=True)
Vvvao = Vvvoa.transpose(0,1,3,2)
Vvvva = np.einsum("pqrs,pa,qb,rc->abcs", V, Cv, Cv, Cv, optimize=True)
if timing:
end = time.time()
print(" ** timing-loop prep : {:.6f}".format(end-start), flush=True)
### compute rdm1 lr ###
# oo/vv lr
rhss_lr = np.zeros([nvirt*nocc,nv])
dm1s_mo_lr = [None for i in range(nv)]
ind = -1
ttot = 0.
for v, U, moe_lr in zip(vs, Us, moes_lr):
ind += 1
if timing:
start = time.time()
# scf, orbital response
dm1_or_lr = U@dm1_rhf + dm1_rhf@U.T
# mp2, orbital response
dm1_or_lr += U@dm1_mp2 + dm1_mp2@U.T
# mp2, oo/vv
Vovov_lr = get_Vovov_lr_batch(Vavov, Voaov, C, U, nocc)
Diajb_lr = get_Diajb(moe_lr, nocc)
t2_lr = (Vovov_lr - t2*Diajb_lr) / Diajb
dm1oo_lr = -np.einsum("iakb,jakb->ij", \
t2_lr, 2.*t2-t2.transpose(0,3,2,1))
dm1vv_lr = np.einsum("iajc,ibjc->ab", \
t2_lr, 2.*t2-t2.transpose(0,3,2,1))
dm1oo_lr += dm1oo_lr.T
dm1vv_lr += dm1vv_lr.T
dm1oovv_lr = slg.block_diag(dm1oo_lr, dm1vv_lr)
# mp2, vo
Vvvoo_lr = get_Vvvoo_lr_batch(Vavoo, Vvvoa, C, U, nocc)
B_lr = get_mp2_B_lr_eris(mf, v, U, moe_lr, Vvvoo_lr, Vovov_lr.transpose(1,0,3,2))
Voovo_lr = get_Voovo_lr_batch(Vaovo, Vooao, Voova, C, U, nocc)
Vvvvo_lr = get_Vvvvo_lr_batch(Vavvo, Vvvao, Vvvva, C, U, nocc)
X_lr = get_mp2_X_lr_eris(eris, v, U, t2_lr, Voovo_lr, Vvvvo_lr, dm1_mp2, dm1oovv_lr)
rhss_lr[:,ind] = X_lr-B_lr.T@dm1vo.ravel()
# collect contributions
dm1s_mo_lr[ind] = dm1_or_lr + dm1oovv_lr
if timing:
end = time.time()
ttot += end-start
if timing:
print(" ** timing-per v : {:.6f}".format(ttot/float(nv)), flush=True)
print(" ** timing-{:4d} vs : {:.6f}".format(nv, ttot), flush=True)
if timing:
start = time.time()
dm1vos_lr = np.linalg.solve(B.T, rhss_lr)
if timing:
end = time.time()
print(" ** timing-cpmp2 : {:.6f}".format(end-start), flush=True)
for ind in range(nv):
dm1vo_lr = dm1vos_lr[:,ind].reshape(nvirt,nocc)
dm1s_mo_lr[ind][nocc:,:nocc] += dm1vo_lr
dm1s_mo_lr[ind][:nocc,nocc:] += dm1vo_lr.T
# return dm1_ao, dm2, dm1s_ao_lr
return dm1_ao, dm1s_mo_lr
def get_mp2_lr_rdm1_lr_mo_fd(mf, v, eps=1.E-3):
h = mf.h
V = mf.mol.V
nao = mf.nao
nocc = mf.nocc
nvirt = nao - nocc
C = mf.mo_coeff
mf_f = scf.RHF(h=h+eps*v, V=V, nocc=nocc)
mf_f.kernel(verbose="mute")
C_f = mf_f.mo_coeff
Pmo_lr_f = get_mp2_lr_rdm1_mo(mf_f)
Pao_lr_f = C_f @ Pmo_lr_f @ C_f.T + mf_f.rdm1
mf_b = scf.RHF(h=h-eps*v, V=V, nocc=nocc)
mf_b.kernel(verbose="mute")
C_b = mf_b.mo_coeff
Pmo_lr_b = get_mp2_lr_rdm1_mo(mf_b)
Pao_lr_b = C_b @ Pmo_lr_b @ C_b.T + mf_b.rdm1
dPao_lr = (Pao_lr_f-Pao_lr_b) / 2. / eps
dPmo_lr = C.T @ dPao_lr @ C
return dPmo_lr
import time
def test_mp2_lr_rdm1_lr_batch():
np.random.seed(1)
nao = 7
nocc = 3
nvirt = nao - nocc
from frankenstein.tools.lat_utils import get_random_hV
h, V = get_random_hV(nao)
"""
geom = "geom/1.xyz"
basis = "sto-3g"
nocc = 21
from frankenstein.tools.lat_utils import get_mol
h, V, e_nuc = get_mol(geom, basis, True)
nao = h.shape[0]
nvirt = nao - nocc
print("nao = {:d}".format(nao))
"""
mf = scf.RHF(h=h, V=V, nocc=nocc)
mf.kernel(verbose="mute")
C = mf.mo_coeff
n0 = 3
vs = []
for p in range(n0):
for q in range(p, n0):
v = np.zeros([nao,nao])
v[p,q] = v[q,p] = 1.
vs.append(v)
start = time.time()
eris = MP2_ERIS(mf)
end = time.time()
print("timing-eris : {:.6f}".format(end-start))
# check ecorr
mmp = mp.MP2(mf)
mmp.kernel()
dm1, dm2 = mp2_rdm(mf, eris, ret_rdm2=True)
hmo = C.T @ h @ C
Vmo = np.einsum("pqrs,pi,qj,rk,sl->ijkl", V,C,C,C,C, optimize=True)
e1 = 2.*np.sum(hmo*dm1)
e2 = np.sum(Vmo*dm2)
print("etot = {: .10f}".format(mmp.e_tot))
print("e1 = {: .10f}".format(e1))
print("e2 = {: .10f}".format(e2))
print("e12 = {: .10f}".format(e1+e2))
print("etot match?", np.allclose(mmp.e_tot, e1+e2))
start = time.time()
dm1, dm2, dPmos = mp2_rdm(mf, eris, vs=vs)
end = time.time()
print("timing-dPmo : {:.6f}".format(end-start))
pq = -1
errs = np.zeros([n0,n0])
for p in range(n0):
for q in range(p,n0):
pq += 1
start = time.time()
dPmo_fd = get_mp2_lr_rdm1_lr_mo_fd(mf, vs[pq])
end = time.time()
print("timing-dPfd : {:.6f}".format(end-start))
err = np.linalg.norm(dPmo_fd-dPmos[pq])
errs[p,q] = errs[q,p] = err
dumpMat(errs, fmt="%.3E")
if __name__ == "__main__":
# test_mp2_t2()
test_mp2_lr_rdm1_lr_batch()
| hongzhouye/frankenstein | tools/mp2_utils.py | Python | bsd-3-clause | 26,754 | [
"MOE"
] | fc6d649b1af9e43ccc456fdacd26cf9713996e9fc5f7a0b8cbe0c55f1227612b |
#!/bin/env python
""" create and put 'PutAndRegister' request with a single local file
warning: make sure the file you want to put is accessible from DIRAC production hosts,
i.e. put file on network fs (AFS or NFS), otherwise operation will fail!!!
"""
__RCSID__ = "$Id: $"
import os
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] requestName LFN localFile targetSE' % Script.scriptName,
'Arguments:',
' requestName: a request name',
' LFN: logical file name'
' localFile: local file you want to put',
' targetSE: target SE' ] ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger
args = Script.getPositionalArgs()
requestName = None
LFN = None
PFN = None
targetSE = None
if not len( args ) != 4:
Script.showHelp()
DIRAC.exit( 0 )
else:
requestName = args[0]
LFN = args[1]
PFN = args[2]
targetSE = args[3]
if not os.path.isabs(LFN):
gLogger.error( "LFN should be absolute path!!!" )
DIRAC.exit( -1 )
gLogger.info( "will create request '%s' with 'PutAndRegister' "\
"operation using %s pfn and %s target SE" % ( requestName, PFN, targetSE ) )
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.Core.Utilities.Adler import fileAdler
if not os.path.exists( PFN ):
gLogger.error( "%s does not exist" % PFN )
DIRAC.exit( -1 )
if not os.path.isfile( PFN ):
gLogger.error( "%s is not a file" % PFN )
DIRAC.exit( -1 )
PFN = os.path.abspath( PFN )
size = os.path.getsize( PFN )
adler32 = fileAdler( PFN )
request = Request()
request.RequestName = requestName
putAndRegister = Operation()
putAndRegister.Type = "PutAndRegister"
putAndRegister.TargetSE = targetSE
opFile = File()
opFile.LFN = LFN
opFile.PFN = PFN
opFile.Size = size
opFile.Checksum = adler32
opFile.ChecksumType = "ADLER32"
putAndRegister.addFile( opFile )
reqClient = ReqClient()
putRequest = reqClient.putRequest( request )
if not putRequest["OK"]:
gLogger.error( "unable to put request '%s': %s" % ( requestName, putRequest["Message"] ) )
DIRAC.exit( -1 )
gLogger.always( "Request '%s' has been put to ReqDB for execution." )
gLogger.always( "You can monitor its status using command: 'dirac-rms-show-request %s'" % requestName )
DIRAC.exit( 0 )
| Sbalbp/DIRAC | DataManagementSystem/scripts/dirac-dms-put-and-register-request.py | Python | gpl-3.0 | 2,952 | [
"DIRAC"
] | 0a1c17f1b2748207868dde5f087e301dd3c8f88f6a171744cdfff780705218e4 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Nov 27, 2011"
import os
import itertools
import math
import subprocess
import numpy as np
import vtk
from monty.serialization import loadfn
from pymatgen.util.coord_utils import in_coord_list
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis(object):
"""
Provides Structure object visualization using VTK.
"""
def __init__(self, element_color_mapping=None, show_unit_cell=True,
show_bonds=False, show_polyhedron=True,
poly_radii_tol_factor=0.5, excluded_bonding_elements=None):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if \
excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1,
image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = ["h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a,"
" b or c unit vector", "# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds", "r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor "
"by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 "
"clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by "
"90 clockwise/anticlockwise", "s: Save view to image.png",
"o: Orthogonalize structure"]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=True)
s.make_supercell(self.supercell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
#matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species_and_occu.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species_and_occu.items():
if sp.symbol in self.excluded_bonding_elements \
or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + \
occu * np.array(self.el_color_mapping.get(sp.symbol,
[0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * \
(max_radius + anion_radius)
nn = structure.get_neighbors(site, max_radius)
nn_sites = []
for nnsite, dist in nn:
if contains_anion(nnsite):
nn_sites.append(nnsite)
if not in_coord_list(inc_coords, nnsite.coords):
self.add_site(nnsite)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
#Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + \
matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(
s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species_and_occu.items():
radius += occu * (specie.ionic_radius
if isinstance(specie, Specie)
and specie.ionic_radius
else specie.average_ionic_radius)
total_occu += occu
for specie, occu in site.species_and_occu.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, radius, color,
start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(site.coords, radius, (1,1,1),
start_angle, start_angle + 360 * (1 - total_occu))
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360,
opacity=1):
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(0.2 + 0.002 * radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(self, neighbors, center, color, opacity=1.0,
draw_edges=False, edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i in range(len(neighbors)):
x, y, z = neighbors[i].coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
#ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == 'element':
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species_and_occu.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(self, neighbors, color, center=None, opacity=0.4,
draw_edges=False, edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y,
neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints( points )
trianglePolyData.SetPolys( triangles )
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == 'element':
if center is None:
raise ValueError(
'Color should be chosen according to the central atom, '
'and central atom is not provided')
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species_and_occu.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif False and len(face) == 4:
points = vtk.vtkPoints()
for ii in range(4):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
line1 = vtk.vtkLine()
line1.GetPointIds().SetId(0, 0)
line1.GetPointIds().SetId(1, 2)
line2 = vtk.vtkLine()
line2.GetPointIds().SetId(0, 3)
line2.GetPointIds().SetId(1, 1)
lines = vtk.vtkCellArray()
lines.InsertNextCell(line1)
lines.InsertNextCell(line2)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
ruledSurfaceFilter = vtk.vtkRuledSurfaceFilter()
ruledSurfaceFilter.SetInput(polydata)
ruledSurfaceFilter.SetResolution(15, 15)
ruledSurfaceFilter.SetRuledModeToResample()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(ruledSurfaceFilter.GetOutput())
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float)
for site in face:
center += site
center /= np.float(len(face))
for ii in range(len(face)):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
ii2 = np.mod(ii+1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type='line', linewidth=2, color=[0.0, 0.0, 0.0]):
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2*iedge, edge[0])
points.InsertPoint(2*iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2*iedge)
lines.InsertCellPoint(2*iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None,
radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
# Create a cell picker.
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = ["{} - ".format(site.species_string),
", ".join(["{:.3f}".format(c)
for c in site.frac_coords]),
"[" + ", ".join(["{:.3f}".format(c)
for c in site.coords]) +
"]"]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
# Create a cell picker.
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [site.species_string, "Frac. coords: " +
" ".join(["{:.4f}".format(c)
for c in
site.frac_coords])]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
self.mouse_motion = 0
self.OnLeftButtonDown()
return
def mouseMoveEvent(self, obj, event):
self.mouse_motion = 1
self.OnMouseMove()
return
def leftButtonReleaseEvent(self, obj, event):
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
return
def keyPressEvent(self, obj, event):
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 \
if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20,
bitrate="10000k", quality=1, **kwargs):
"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
\*\*kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = ["ffmpeg", "-y", "-i", filename,
"-q:v", str(quality), "-r", str(fps), "-b:v", str(bitrate),
output_filename]
subprocess.Popen(args)
| migueldiascosta/pymatgen | pymatgen/vis/structure_vtk.py | Python | mit | 34,037 | [
"Jmol",
"VTK",
"pymatgen"
] | d181b4c5ecf805161a03a80672fc90afdda6f385b4b463503d06c7d0de47603c |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import gzip
import bz2
from tempfile import gettempdir
import itertools
import requests
from cachecontrol import CacheControl
from cachecontrol.caches import FileCache
from skbio.io import IOSourceError
from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
WrappedBufferedRandom)
def get_io_sources():
return (
# The order of these source is significant as they will short-circuit
HTTPSource,
FilePathSource,
BytesIOSource,
BufferedIOSource,
TextIOSource,
IterableSource
)
def _compressors():
return (
GzipCompressor,
BZ2Compressor
)
def get_compression_handler(name):
compressors = {c.name: c for c in _compressors()}
compressors['auto'] = AutoCompressor
return compressors.get(name, False)
class IOSource:
closeable = True
def __init__(self, file, options):
self.file = file
self.options = options
def can_read(self):
return False
def can_write(self):
return False
def get_reader(self):
raise NotImplementedError()
def get_writer(self):
raise NotImplementedError()
class Compressor(IOSource):
streamable = True
name = ''
def can_write(self):
return True
class FilePathSource(IOSource):
def can_read(self):
return isinstance(self.file, str)
def can_write(self):
return self.can_read()
def get_reader(self):
return io.open(self.file, mode='rb')
def get_writer(self):
return io.open(self.file, mode='wb')
class HTTPSource(IOSource):
def can_read(self):
return (
isinstance(self.file, str) and
requests.compat.urlparse(self.file).scheme in {'http', 'https'})
def get_reader(self):
sess = CacheControl(requests.Session(),
cache=FileCache(gettempdir()))
req = sess.get(self.file)
# if the response is not 200, an exception will be raised
req.raise_for_status()
return io.BufferedReader(io.BytesIO(req.content))
class BytesIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.BytesIO)
def can_write(self):
return self.can_read()
def get_reader(self):
return WrappedBufferedRandom(self.file)
def get_writer(self):
return self.get_reader()
class BufferedIOSource(IOSource):
closeable = False
def can_read(self):
# `peek` is part of the API we want to guarantee, so we can't just look
# for io.BufferedIOBase. Despite the fact that the C implementation of
# io.BufferedRandom inherits io.BufferedReader/Writer it is not
# reflected in an isinstance check, so we need to check for it manually
return isinstance(self.file, (io.BufferedReader, io.BufferedRandom))
def can_write(self):
return isinstance(self.file, (io.BufferedWriter, io.BufferedRandom))
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class TextIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.TextIOBase) and self.file.readable()
def can_write(self):
return isinstance(self.file, io.TextIOBase) and self.file.writable()
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class IterableSource(IOSource):
def can_read(self):
if hasattr(self.file, '__iter__'):
iterator = iter(self.file)
head = next(iterator, None)
if head is None:
self.repaired = []
return True
if isinstance(head, str):
self.repaired = itertools.chain([head], iterator)
return True
else:
# We may have mangled a generator at this point, so just abort
raise IOSourceError(
"Could not open source: %r (mode: %r)" %
(self.file, self.options['mode']))
return False
def can_write(self):
return hasattr(self.file, 'append') and hasattr(self.file, '__iter__')
def get_reader(self):
return IterableStringReaderIO(self.repaired,
newline=self.options['newline'])
def get_writer(self):
return IterableStringWriterIO(self.file,
newline=self.options['newline'])
class GzipCompressor(Compressor):
name = 'gzip'
streamable = True
def can_read(self):
return self.file.peek(2)[:2] == b'\x1f\x8b'
def get_reader(self):
return gzip.GzipFile(fileobj=self.file)
def get_writer(self):
return gzip.GzipFile(fileobj=self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class BZ2Compressor(Compressor):
name = 'bz2'
streamable = False
def can_read(self):
return self.file.peek(3)[:3] == b'BZh'
def get_reader(self):
return bz2.BZ2File(self.file, mode='rb')
def get_writer(self):
return bz2.BZ2File(self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class AutoCompressor(Compressor):
streamable = True # We can' write so it doesn't matter
name = 'auto'
def get_reader(self):
for compression_handler in _compressors():
compressor = compression_handler(self.file, self.options)
if compressor.can_read():
return compressor.get_reader()
return self.file
def get_writer(self):
return self.file
| anderspitman/scikit-bio | skbio/io/_iosources.py | Python | bsd-3-clause | 6,117 | [
"scikit-bio"
] | c70819950a84bfd9e2f45c5820c596e167f13aa3058f49c9328bd50f7fd5ca86 |
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib2
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
from .. import common
openerpweb = common.http
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def concat_files(file_list, reader=None, intersperse=""):
""" Concatenates contents of all provided files
:param list(str) file_list: list of files to check
:param function reader: reading procedure for each file
:param str intersperse: string to intersperse between file contents
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
if reader is None:
def reader(f):
with open(f) as fp:
return fp.read()
files_content = []
for fname in file_list:
contents = reader(fname)
checksum.update(contents)
files_content.append(contents)
files_concat = intersperse.join(files_content)
return files_concat, checksum.hexdigest()
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>LibrERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body class="openerp" id="oe"></body>
</html>
"""
class WebClient(openerpweb.Controller):
_cp_path = "/web/webclient"
def server_wide_modules(self, req):
addons = [i for i in req.config.server_wide_modules if i in openerpweb.addons_manifest]
return addons
def manifest_glob(self, req, addons, key):
if addons is None:
addons = self.server_wide_modules(req)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = openerpweb.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(key, [])
for pattern in globlist:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append( (path, path[len(addons_path):]))
return r
def manifest_list(self, req, mods, extension):
if not req.debug:
path = '/web/webclient/' + extension
if mods is not None:
path += '?mods=' + mods
return [path]
# re-normalize fs paths to URLs: split on fs path separator
# ('/' or '\\' usually) and join on url path separator ('/')
return ['%s?debug=%s' % ('/'.join(wp.split(os.path.sep)), os.path.getmtime(fp))
for fp, wp in self.manifest_glob(req, mods, extension)]
@openerpweb.jsonrequest
def csslist(self, req, mods=None):
return self.manifest_list(req, mods, 'css')
@openerpweb.jsonrequest
def jslist(self, req, mods=None):
return self.manifest_list(req, mods, 'js')
@openerpweb.jsonrequest
def qweblist(self, req, mods=None):
return self.manifest_list(req, mods, 'qweb')
def get_last_modified(self, files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(self, req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest)
@openerpweb.httprequest
def css(self, req, mods=None):
files = list(self.manifest_glob(req, mods, 'css'))
last_modified = self.get_last_modified(f[0] for f in files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
file_map = dict(files)
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://)""", re.U)
def reader(f):
"""read the a css file and absolutify all relative uris"""
with open(f) as fp:
data = fp.read()
path = file_map[f]
# convert FS path into web path
web_dir = '/'.join(os.path.dirname(path).split(os.path.sep))
data = re.sub(
rx_import,
r"""@import \1%s/""" % (web_dir,),
data,
)
data = re.sub(
rx_url,
r"""url(\1%s/""" % (web_dir,),
data,
)
return data
content, checksum = concat_files((f[0] for f in files), reader)
return self.make_conditional(
req, req.make_response(content, [('Content-Type', 'text/css')]),
last_modified, checksum)
@openerpweb.httprequest
def js(self, req, mods=None):
files = [f[0] for f in self.manifest_glob(req, mods, 'js')]
last_modified = self.get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_files(files, intersperse=';')
return self.make_conditional(
req, req.make_response(content, [('Content-Type', 'application/javascript')]),
last_modified, checksum)
@openerpweb.httprequest
def qweb(self, req, mods=None):
files = [f[0] for f in self.manifest_glob(req, mods, 'qweb')]
last_modified = self.get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content,checksum = concat_xml(files)
return self.make_conditional(
req, req.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@openerpweb.httprequest
def home(self, req, s_action=None, **kw):
js = "\n ".join('<script type="text/javascript" src="%s"></script>'%i for i in self.manifest_list(req, None, 'js'))
css = "\n ".join('<link rel="stylesheet" href="%s">'%i for i in self.manifest_list(req, None, 'css'))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(self.server_wide_modules(req)),
'init': 'new s.web.WebClient().start();',
}
return r
@openerpweb.httprequest
def login(self, req, db, login, key):
req.session.authenticate(db, login, key, {})
redirect = werkzeug.utils.redirect('/web/webclient/home', 303)
cookie_val = urllib2.quote(simplejson.dumps(req.session_id))
redirect.set_cookie('session0|session_id', cookie_val)
return redirect
@openerpweb.jsonrequest
def translations(self, req, mods, lang):
lang_model = req.session.model('res.lang')
ids = lang_model.search([("code", "=", lang)])
if ids:
lang_obj = lang_model.read(ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
else:
lang_obj = None
if "_" in lang:
separator = "_"
else:
separator = "@"
langs = lang.split(separator)
langs = [separator.join(langs[:x]) for x in range(1, len(langs) + 1)]
transs = {}
for addon_name in mods:
transl = {"messages":[]}
transs[addon_name] = transl
addons_path = openerpweb.addons_manifest[addon_name]['addons_path']
for l in langs:
f_name = os.path.join(addons_path, addon_name, "i18n", l + ".po")
if not os.path.exists(f_name):
continue
try:
with open(f_name) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
continue
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
transl["messages"].append({'id': x.id, 'string': x.string})
return {"modules": transs,
"lang_parameters": lang_obj}
@openerpweb.jsonrequest
def version_info(self, req):
return {
"version": common.release.version
}
class Proxy(openerpweb.Controller):
_cp_path = '/web/proxy'
@openerpweb.jsonrequest
def load(self, req, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param req: OpenERP request
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(req.httprequest.app, BaseResponse).get(path).data
class Database(openerpweb.Controller):
_cp_path = "/web/database"
@openerpweb.jsonrequest
def get_list(self, req):
proxy = req.session.proxy("db")
dbs = proxy.list()
h = req.httprequest.environ['HTTP_HOST'].split(':')[0]
d = h.split('.')[0]
r = req.config.dbfilter.replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return {"db_list": dbs}
@openerpweb.jsonrequest
def create(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
create_attrs = (
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd']
)
return req.session.proxy("db").create_database(*create_attrs)
@openerpweb.jsonrequest
def drop(self, req, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").drop(password, db)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': 'Drop Database'}
return {'error': 'Could not drop database !', 'title': 'Drop Database'}
@openerpweb.httprequest
def backup(self, req, backup_db, backup_pwd, token):
db_dump = base64.b64decode(
req.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return req.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', 'attachment; filename="' + filename + '"')],
{'fileToken': int(token)}
)
@openerpweb.httprequest
#_duplicate_database(self, db_original_name, db_name):
def duplicate(self, req, backup_pwd, backup_db, new_db, token):
try:
req.session.proxy("db").duplicate_database(backup_pwd, backup_db, new_db)
return ''
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
raise Exception("AccessDenied")
@openerpweb.httprequest
def restore(self, req, db_file, restore_pwd, new_db):
try:
data = base64.b64encode(db_file.read())
req.session.proxy("db").restore(restore_pwd, new_db, data)
return ''
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
raise Exception("AccessDenied")
@openerpweb.jsonrequest
def change_password(self, req, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").change_admin_password(old_password, new_password)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': 'Change Password'}
return {'error': 'Error, password not changed !', 'title': 'Change Password'}
def topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
class Session(openerpweb.Controller):
_cp_path = "/web/session"
def session_info(self, req):
req.session.ensure_valid()
return {
"session_id": req.session_id,
"uid": req.session._uid,
"context": req.session.get_context() if req.session._uid else {},
"db": req.session._db,
"login": req.session._login,
"openerp_entreprise": req.session.openerp_entreprise(),
}
@openerpweb.jsonrequest
def get_session_info(self, req):
return self.session_info(req)
@openerpweb.jsonrequest
def authenticate(self, req, db, login, password, base_location=None):
wsgienv = req.httprequest.environ
release = common.release
env = dict(
base_location=base_location,
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
user_agent="%s / %s" % (release.name, release.version),
)
req.session.authenticate(db, login, password, env)
return self.session_info(req)
@openerpweb.jsonrequest
def change_password (self,req,fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':'All passwords have to be filled.','title': 'Change Password'}
if new_password != confirm_password:
return {'error': 'The new password and its confirmation must be identical.','title': 'Change Password'}
try:
if req.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': 'Original password incorrect, your password was not changed.', 'title': 'Change Password'}
return {'error': 'Error, password not changed !', 'title': 'Change Password'}
@openerpweb.jsonrequest
def sc_list(self, req):
return req.session.model('ir.ui.view_sc').get_sc(
req.session._uid, "ir.ui.menu", req.session.eval_context(req.context))
@openerpweb.jsonrequest
def get_lang_list(self, req):
try:
return {
'lang_list': (req.session.proxy("db").list_lang() or []),
'error': ""
}
except Exception, e:
return {"error": e, "title": "Languages"}
@openerpweb.jsonrequest
def modules(self, req):
# Compute available candidates module
loadable = openerpweb.addons_manifest
loaded = set(req.config.server_wide_modules)
candidates = [mod for mod in loadable if mod not in loaded]
# already installed modules have no dependencies
modules = dict.fromkeys(loaded, [])
# Compute auto_install modules that might be on the web side only
modules.update((name, openerpweb.addons_manifest[name].get('depends', []))
for name in candidates
if openerpweb.addons_manifest[name].get('auto_install'))
# Retrieve database installed modules
Modules = req.session.model('ir.module.module')
for module in Modules.search_read(
[('state','=','installed'), ('name','in', candidates)],
['name', 'dependencies_id']):
deps = module.get('dependencies_id')
if deps:
dependencies = map(
operator.itemgetter('name'),
req.session.model('ir.module.module.dependency').read(deps, ['name']))
modules[module['name']] = list(
set(modules.get(module['name'], []) + dependencies))
sorted_modules = topological_sort(modules)
return [module for module in sorted_modules if module not in loaded]
@openerpweb.jsonrequest
def eval_domain_and_context(self, req, contexts, domains,
group_by_seq=None):
""" Evaluates sequences of domains and contexts, composing them into
a single context, domain or group_by sequence.
:param list contexts: list of contexts to merge together. Contexts are
evaluated in sequence, all previous contexts
are part of their own evaluation context
(starting at the session context).
:param list domains: list of domains to merge together. Domains are
evaluated in sequence and appended to one another
(implicit AND), their evaluation domain is the
result of merging all contexts.
:param list group_by_seq: list of domains (which may be in a different
order than the ``contexts`` parameter),
evaluated in sequence, their ``'group_by'``
key is extracted if they have one.
:returns:
a 3-dict of:
context (``dict``)
the global context created by merging all of
``contexts``
domain (``list``)
the concatenation of all domains
group_by (``list``)
a list of fields to group by, potentially empty (in which case
no group by should be performed)
"""
context, domain = eval_context_and_domain(req.session,
common.nonliterals.CompoundContext(*(contexts or [])),
common.nonliterals.CompoundDomain(*(domains or [])))
group_by_sequence = []
for candidate in (group_by_seq or []):
ctx = req.session.eval_context(candidate, context)
group_by = ctx.get('group_by')
if not group_by:
continue
elif isinstance(group_by, basestring):
group_by_sequence.append(group_by)
else:
group_by_sequence.extend(group_by)
return {
'context': context,
'domain': domain,
'group_by': group_by_sequence
}
@openerpweb.jsonrequest
def save_session_action(self, req, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
saved_actions = {"next":0, "actions":{}}
req.httpsession['saved_actions'] = saved_actions
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = the_action
saved_actions["next"] = key + 1
return key
@openerpweb.jsonrequest
def get_session_action(self, req, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
return None
return saved_actions["actions"].get(key)
@openerpweb.jsonrequest
def check(self, req):
req.session.assert_valid()
return None
@openerpweb.jsonrequest
def destroy(self, req):
req.session._suicide = True
def eval_context_and_domain(session, context, domain=None):
e_context = session.eval_context(context)
# should we give the evaluated context as an evaluation context to the domain?
e_domain = session.eval_domain(domain or [])
return e_context, e_domain
def load_actions_from_ir_values(req, key, key2, models, meta):
context = req.session.eval_context(req.context)
Values = req.session.model('ir.values')
actions = Values.get(key, key2, models, meta, context)
return [(id, name, clean_action(req, action))
for id, name, action in actions]
def clean_action(req, action, do_not_eval=False):
if action is False:
action = {}
action.setdefault('flags', {})
context = req.session.eval_context(req.context)
eval_ctx = req.session.evaluation_context(context)
if not do_not_eval:
# values come from the server, we can just eval them
if action.get('context', False) and isinstance(action.get('context'), basestring):
action['context'] = eval(action.get('context', '').replace('\n', ''), eval_ctx) or {}
if action.get('domain', False) and isinstance(action.get('domain'), basestring):
action['domain'] = eval(action.get('domain', ''), eval_ctx) or []
else:
if 'context' in action:
action['context'] = parse_context(action['context'], req.session)
if 'domain' in action:
action['domain'] = parse_domain(action['domain'], req.session)
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
This method also adds a ``page`` view mode in case there is a ``form`` in
the input action.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
id_form = None
for index, (id, mode) in enumerate(action['views']):
if mode == 'form':
id_form = id
break
if id_form is not None:
action['views'].insert(index + 1, (id_form, 'page'))
if action.pop('view_type', 'form') != 'form':
return action
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
class Menu(openerpweb.Controller):
_cp_path = "/web/menu"
@openerpweb.jsonrequest
def load(self, req):
return {'data': self.do_load(req)}
@openerpweb.jsonrequest
def get_user_roots(self, req):
return self.do_get_user_roots(req)
def do_get_user_roots(self, req):
""" Return all root menu ids visible for the session user.
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the root menu ids
:rtype: list(int)
"""
s = req.session
context = s.eval_context(req.context)
Menus = s.model('ir.ui.menu')
# If a menu action is defined use its domain to get the root menu items
user_menu_id = s.model('res.users').read([s._uid], ['menu_id'], context)[0]['menu_id']
menu_domain = [('parent_id', '=', False)]
if user_menu_id:
domain_string = s.model('ir.actions.act_window').read([user_menu_id[0]], ['domain'], context)[0]['domain']
if domain_string:
menu_domain = ast.literal_eval(domain_string)
return Menus.search(menu_domain, 0, False, False, context)
def do_load(self, req):
""" Loads all menu items (all applications and their sub-menus).
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
context = req.session.eval_context(req.context)
Menus = req.session.model('ir.ui.menu')
menu_roots = Menus.read(self.do_get_user_roots(req), ['name', 'sequence', 'parent_id', 'icon', ], context)
menu_root = {'id': False, 'name': 'root', 'parent_id': [-1, ''], 'children' : menu_roots}
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([], 0, False, False, context)
menu_items = Menus.read(menu_ids, ['name', 'sequence', 'parent_id', 'icon', ], context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
# make a tree using parent_id
menu_items_map = dict((menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@openerpweb.jsonrequest
def action(self, req, menu_id):
actions = load_actions_from_ir_values(req,'action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
return {"action": actions}
class DataSet(openerpweb.Controller):
_cp_path = "/web/dataset"
@openerpweb.jsonrequest
def fields(self, req, model):
return {'fields': req.session.model(model).fields_get(False,
req.session.eval_context(req.context))}
@openerpweb.jsonrequest
def search_read(self, req, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(req, model, fields, offset, limit, domain, sort)
def do_search_read(self, req, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param req: a JSON-RPC request object
:type req: openerpweb.JsonRequest
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = req.session.model(model)
context, domain = eval_context_and_domain(
req.session, req.context, domain)
ids = Model.search(domain, offset or 0, limit or False, sort or False, context)
if limit and len(ids) == limit:
length = Model.search_count(domain, context)
else:
length = len(ids) + (offset or 0)
if fields and fields == ['id']:
# shortcut read if we only want the ids
return {
'ids': ids,
'length': length,
'records': [{'id': id} for id in ids]
}
records = Model.read(ids, fields or False, context)
records.sort(key=lambda obj: ids.index(obj['id']))
return {
'ids': ids,
'length': length,
'records': records
}
@openerpweb.jsonrequest
def read(self, req, model, ids, fields=False):
return self.do_search_read(req, model, ids, fields)
@openerpweb.jsonrequest
def get(self, req, model, ids, fields=False):
return self.do_get(req, model, ids, fields)
def do_get(self, req, model, ids, fields=False):
""" Fetches and returns the records of the model ``model`` whose ids
are in ``ids``.
The results are in the same order as the inputs, but elements may be
missing (if there is no record left for the id)
:param req: the JSON-RPC2 request object
:type req: openerpweb.JsonRequest
:param model: the model to read from
:type model: str
:param ids: a list of identifiers
:type ids: list
:param fields: a list of fields to fetch, ``False`` or empty to fetch
all fields in the model
:type fields: list | False
:returns: a list of records, in the same order as the list of ids
:rtype: list
"""
Model = req.session.model(model)
records = Model.read(ids, fields, req.session.eval_context(req.context))
record_map = dict((record['id'], record) for record in records)
return [record_map[id] for id in ids if record_map.get(id)]
@openerpweb.jsonrequest
def load(self, req, model, id, fields):
m = req.session.model(model)
value = {}
r = m.read([id], False, req.session.eval_context(req.context))
if r:
value = r[0]
return {'value': value}
@openerpweb.jsonrequest
def create(self, req, model, data):
m = req.session.model(model)
r = m.create(data, req.session.eval_context(req.context))
return {'result': r}
@openerpweb.jsonrequest
def save(self, req, model, id, data):
m = req.session.model(model)
r = m.write([id], data, req.session.eval_context(req.context))
return {'result': r}
@openerpweb.jsonrequest
def unlink(self, req, model, ids=()):
Model = req.session.model(model)
return Model.unlink(ids, req.session.eval_context(req.context))
def call_common(self, req, model, method, args, domain_id=None, context_id=None):
has_domain = domain_id is not None and domain_id < len(args)
has_context = context_id is not None and context_id < len(args)
domain = args[domain_id] if has_domain else []
context = args[context_id] if has_context else {}
c, d = eval_context_and_domain(req.session, context, domain)
if has_domain:
args[domain_id] = d
if has_context:
args[context_id] = c
return self._call_kw(req, model, method, args, {})
def _call_kw(self, req, model, method, args, kwargs):
for i in xrange(len(args)):
if isinstance(args[i], common.nonliterals.BaseContext):
args[i] = req.session.eval_context(args[i])
elif isinstance(args[i], common.nonliterals.BaseDomain):
args[i] = req.session.eval_domain(args[i])
for k in kwargs.keys():
if isinstance(kwargs[k], common.nonliterals.BaseContext):
kwargs[k] = req.session.eval_context(kwargs[k])
elif isinstance(kwargs[k], common.nonliterals.BaseDomain):
kwargs[k] = req.session.eval_domain(kwargs[k])
return getattr(req.session.model(model), method)(*args, **kwargs)
@openerpweb.jsonrequest
def onchange(self, req, model, method, args, context_id=None):
""" Support method for handling onchange calls: behaves much like call
with the following differences:
* Does not take a domain_id
* Is aware of the return value's structure, and will parse the domains
if needed in order to return either parsed literal domains (in JSON)
or non-literal domain instances, allowing those domains to be used
from JS
:param req:
:type req: web.common.http.JsonRequest
:param str model: object type on which to call the method
:param str method: name of the onchange handler method
:param list args: arguments to call the onchange handler with
:param int context_id: index of the context object in the list of
arguments
:return: result of the onchange call with all domains parsed
"""
result = self.call_common(req, model, method, args, context_id=context_id)
if not result or 'domain' not in result:
return result
result['domain'] = dict(
(k, parse_domain(v, req.session))
for k, v in result['domain'].iteritems())
return result
@openerpweb.jsonrequest
def call(self, req, model, method, args, domain_id=None, context_id=None):
return self.call_common(req, model, method, args, domain_id, context_id)
@openerpweb.jsonrequest
def call_kw(self, req, model, method, args, kwargs):
return self._call_kw(req, model, method, args, kwargs)
@openerpweb.jsonrequest
def call_button(self, req, model, method, args, domain_id=None, context_id=None):
action = self.call_common(req, model, method, args, domain_id, context_id)
if isinstance(action, dict) and action.get('type') != '':
return {'result': clean_action(req, action)}
return {'result': False}
@openerpweb.jsonrequest
def exec_workflow(self, req, model, id, signal):
r = req.session.exec_workflow(model, id, signal)
return {'result': r}
@openerpweb.jsonrequest
def default_get(self, req, model, fields):
Model = req.session.model(model)
return Model.default_get(fields, req.session.eval_context(req.context))
@openerpweb.jsonrequest
def name_search(self, req, model, search_str, domain=[], context={}):
m = req.session.model(model)
r = m.name_search(search_str+'%', domain, '=ilike', context)
return {'result': r}
class DataGroup(openerpweb.Controller):
_cp_path = "/web/group"
@openerpweb.jsonrequest
def read(self, req, model, fields, group_by_fields, domain=None, sort=None):
Model = req.session.model(model)
context, domain = eval_context_and_domain(req.session, req.context, domain)
return Model.read_group(
domain or [], fields, group_by_fields, 0, False,
dict(context, group_by=group_by_fields), sort or False)
class View(openerpweb.Controller):
_cp_path = "/web/view"
def fields_view_get(self, req, model, view_id, view_type,
transform=True, toolbar=False, submenu=False):
Model = req.session.model(model)
context = req.session.eval_context(req.context)
fvg = Model.fields_view_get(view_id, view_type, context, toolbar, submenu)
# todo fme?: check that we should pass the evaluated context here
self.process_view(req.session, fvg, context, transform, (view_type == 'kanban'))
if toolbar and transform:
self.process_toolbar(req, fvg['toolbar'])
return fvg
def process_view(self, session, fvg, context, transform, preserve_whitespaces=False):
# depending on how it feels, xmlrpclib.ServerProxy can translate
# XML-RPC strings to ``str`` or ``unicode``. ElementTree does not
# enjoy unicode strings which can not be trivially converted to
# strings, and it blows up during parsing.
# So ensure we fix this retardation by converting view xml back to
# bit strings.
if isinstance(fvg['arch'], unicode):
arch = fvg['arch'].encode('utf-8')
else:
arch = fvg['arch']
if transform:
evaluation_context = session.evaluation_context(context or {})
xml = self.transform_view(arch, session, evaluation_context)
else:
xml = ElementTree.fromstring(arch)
fvg['arch'] = common.xml2json.from_elementtree(xml, preserve_whitespaces)
if 'id' in fvg['fields']:
# Special case for id's
id_field = fvg['fields']['id']
id_field['original_type'] = id_field['type']
id_field['type'] = 'id'
for field in fvg['fields'].itervalues():
if field.get('views'):
for view in field["views"].itervalues():
self.process_view(session, view, None, transform)
if field.get('domain'):
field["domain"] = parse_domain(field["domain"], session)
if field.get('context'):
field["context"] = parse_context(field["context"], session)
def process_toolbar(self, req, toolbar):
"""
The toolbar is a mapping of section_key: [action_descriptor]
We need to clean all those actions in order to ensure correct
round-tripping
"""
for actions in toolbar.itervalues():
for action in actions:
if 'context' in action:
action['context'] = parse_context(
action['context'], req.session)
if 'domain' in action:
action['domain'] = parse_domain(
action['domain'], req.session)
@openerpweb.jsonrequest
def add_custom(self, req, view_id, arch):
CustomView = req.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': req.session._uid,
'ref_id': view_id,
'arch': arch
}, req.session.eval_context(req.context))
return {'result': True}
@openerpweb.jsonrequest
def undo_custom(self, req, view_id, reset=False):
CustomView = req.session.model('ir.ui.view.custom')
context = req.session.eval_context(req.context)
vcustom = CustomView.search([('user_id', '=', req.session._uid), ('ref_id' ,'=', view_id)],
0, False, False, context)
if vcustom:
if reset:
CustomView.unlink(vcustom, context)
else:
CustomView.unlink([vcustom[0]], context)
return {'result': True}
return {'result': False}
def transform_view(self, view_string, session, context=None):
# transform nodes on the fly via iterparse, instead of
# doing it statically on the parsing result
parser = ElementTree.iterparse(StringIO(view_string), events=("start",))
root = None
for event, elem in parser:
if event == "start":
if root is None:
root = elem
self.parse_domains_and_contexts(elem, session)
return root
def parse_domains_and_contexts(self, elem, session):
""" Converts domains and contexts from the view into Python objects,
either literals if they can be parsed by literal_eval or a special
placeholder object if the domain or context refers to free variables.
:param elem: the current node being parsed
:type param: xml.etree.ElementTree.Element
:param session: OpenERP session object, used to store and retrieve
non-literal objects
:type session: openerpweb.openerpweb.OpenERPSession
"""
for el in ['domain', 'filter_domain']:
domain = elem.get(el, '').strip()
if domain:
elem.set(el, parse_domain(domain, session))
elem.set(el + '_string', domain)
for el in ['context', 'default_get']:
context_string = elem.get(el, '').strip()
if context_string:
elem.set(el, parse_context(context_string, session))
elem.set(el + '_string', context_string)
@openerpweb.jsonrequest
def load(self, req, model, view_id, view_type, toolbar=False):
return self.fields_view_get(req, model, view_id, view_type, toolbar=toolbar)
def parse_domain(domain, session):
""" Parses an arbitrary string containing a domain, transforms it
to either a literal domain or a :class:`common.nonliterals.Domain`
:param domain: the domain to parse, if the domain is not a string it
is assumed to be a literal domain and is returned as-is
:param session: Current OpenERP session
:type session: openerpweb.openerpweb.OpenERPSession
"""
if not isinstance(domain, basestring):
return domain
try:
return ast.literal_eval(domain)
except ValueError:
# not a literal
return common.nonliterals.Domain(session, domain)
def parse_context(context, session):
""" Parses an arbitrary string containing a context, transforms it
to either a literal context or a :class:`common.nonliterals.Context`
:param context: the context to parse, if the context is not a string it
is assumed to be a literal domain and is returned as-is
:param session: Current OpenERP session
:type session: openerpweb.openerpweb.OpenERPSession
"""
if not isinstance(context, basestring):
return context
try:
return ast.literal_eval(context)
except ValueError:
return common.nonliterals.Context(session, context)
class ListView(View):
_cp_path = "/web/listview"
def process_colors(self, view, row, context):
colors = view['arch']['attrs'].get('colors')
if not colors:
return None
color = [
pair.split(':')[0]
for pair in colors.split(';')
if eval(pair.split(':')[1], dict(context, **row))
]
if not color:
return None
elif len(color) == 1:
return color[0]
return 'maroon'
class TreeView(View):
_cp_path = "/web/treeview"
@openerpweb.jsonrequest
def action(self, req, model, id):
return load_actions_from_ir_values(
req,'action', 'tree_but_open',[(model, id)],
False)
class SearchView(View):
_cp_path = "/web/searchview"
@openerpweb.jsonrequest
def load(self, req, model, view_id):
fields_view = self.fields_view_get(req, model, view_id, 'search')
return {'fields_view': fields_view}
@openerpweb.jsonrequest
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.session.eval_context(req.context))
for field in fields.values():
# shouldn't convert the views too?
if field.get('domain'):
field["domain"] = parse_domain(field["domain"], req.session)
if field.get('context'):
field["context"] = parse_context(field["context"], req.session)
return {'fields': fields}
@openerpweb.jsonrequest
def get_filters(self, req, model):
logger = logging.getLogger(__name__ + '.SearchView.get_filters')
Model = req.session.model("ir.filters")
filters = Model.get_filters(model)
for filter in filters:
try:
parsed_context = parse_context(filter["context"], req.session)
filter["context"] = (parsed_context
if not isinstance(parsed_context, common.nonliterals.BaseContext)
else req.session.eval_context(parsed_context))
parsed_domain = parse_domain(filter["domain"], req.session)
filter["domain"] = (parsed_domain
if not isinstance(parsed_domain, common.nonliterals.BaseDomain)
else req.session.eval_domain(parsed_domain))
except Exception:
logger.exception("Failed to parse custom filter %s in %s",
filter['name'], model)
filter['disabled'] = True
del filter['context']
del filter['domain']
return filters
@openerpweb.jsonrequest
def save_filter(self, req, model, name, context_to_save, domain):
Model = req.session.model("ir.filters")
ctx = common.nonliterals.CompoundContext(context_to_save)
ctx.session = req.session
ctx = ctx.evaluate()
domain = common.nonliterals.CompoundDomain(domain)
domain.session = req.session
domain = domain.evaluate()
uid = req.session._uid
context = req.session.eval_context(req.context)
to_return = Model.create_or_replace({"context": ctx,
"domain": domain,
"model_id": model,
"name": name,
"user_id": uid
}, context)
return to_return
@openerpweb.jsonrequest
def add_to_dashboard(self, req, menu_id, action_id, context_to_save, domain, view_mode, name=''):
to_eval = common.nonliterals.CompoundContext(context_to_save)
to_eval.session = req.session
ctx = dict((k, v) for k, v in to_eval.evaluate().iteritems()
if not k.startswith('search_default_')
if k != 'lang')
ctx['dashboard_merge_domains_contexts'] = False # TODO: replace this 6.1 workaround by attribute on <action/>
domain = common.nonliterals.CompoundDomain(domain)
domain.session = req.session
domain = domain.evaluate()
dashboard_action = load_actions_from_ir_values(req, 'action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
if dashboard_action:
action = dashboard_action[0][2]
if action['res_model'] == 'board.board' and action['views'][0][1] == 'form':
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = req.session.model(action['res_model']).fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name' : str(action_id),
'string' : name,
'view_mode' : view_mode,
'context' : str(ctx),
'domain' : str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, 'utf-8')
return req.session.model('ir.ui.view.custom').create({
'user_id': req.session._uid,
'ref_id': view_id,
'arch': arch
}, req.session.eval_context(req.context))
return False
class Binary(openerpweb.Controller):
_cp_path = "/web/binary"
@openerpweb.httprequest
def image(self, req, model, id, field, **kw):
Model = req.session.model(model)
context = req.session.eval_context(req.context)
try:
if not id:
res = Model.default_get([field], context).get(field)
else:
res = Model.read([int(id)], [field], context)[0].get(field)
image_data = base64.b64decode(res)
except (TypeError, xmlrpclib.Fault):
image_data = self.placeholder(req)
return req.make_response(image_data, [
('Content-Type', 'image/png'), ('Content-Length', len(image_data))])
def placeholder(self, req):
addons_path = openerpweb.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', 'placeholder.png'), 'rb').read()
def content_disposition(self, filename, req, ttype='attachment'):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = req.httprequest.user_agent.browser
version = int((req.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "%s; filename=%s" % (ttype, escaped)
elif browser == 'safari':
return "%s; filename=%s" % (ttype, filename)
else:
return "%s; filename*=UTF-8''%s" % (ttype, escaped)
@openerpweb.httprequest
def saveas(self, req, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param req: OpenERP request
:type req: :class:`web.common.http.HttpRequest`
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
import mimetypes
import os
Model = req.session.model(model)
context = req.session.eval_context(req.context)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field) or '')
if not filecontent:
return req.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
if kw.get('no_download'):
fname, fextension = os.path.splitext(filename)
fextension = fextension.lower()
full_fname = '%s_%s' % (fname, fextension)
return req.make_response(filecontent,
[('Content-Type', mimetypes.types_map[fextension]),
('Content-Disposition', self.content_disposition(full_fname, req, 'inline'))])
return req.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', self.content_disposition(filename, req))])
@openerpweb.httprequest
def saveas_ajax(self, req, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', dict())
content_type = 'application/octet-stream'
Model = req.session.model(model)
context = req.session.eval_context(context)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
fields.append('file_type')
res = Model.read([int(id)], fields, context)[0]
if res.get('file_type'):
content_type = res['file_type']
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field) or '')
if not filecontent:
raise ValueError("No content found for field '%s' on '%s:%s'" %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(filecontent,
headers=[('Content-Type', content_type),
('Content-Disposition', self.content_disposition(filename, req))],
cookies={'fileToken': int(token)})
@openerpweb.httprequest
def upload(self, req, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
# Bugfix 2514_2513 in callback row
try:
out = """<script language="javascript" type="text/javascript">
var win = window.top.window,
callback = win[%s + '_callback'];
if (typeof(callback) === 'function') {
callback.apply(this, %s);
} else {
win.jQuery('#oe_notification', win.document).notify('create', {
title: "Ajax File Upload",
text: "Could not find callback"
});
}
</script>"""
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def upload_attachment(self, req, callback, model, id, ufile):
context = req.session.eval_context(req.context)
Model = req.session.model('ir.attachment')
try:
out = """<script language="javascript" type="text/javascript">
var win = window.top.window,
callback = win[%s];
if (typeof(callback) === 'function') {
callback.call(this, %s);
}
</script>"""
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception, e:
args = { 'error': e.message }
return out % (simplejson.dumps(callback), simplejson.dumps(args))
class Action(openerpweb.Controller):
_cp_path = "/web/action"
# For most actions, the type attribute and the model name are the same, but
# there are exceptions. This dict is used to remap action type attributes
# to the "real" model name when they differ.
action_mapping = {
"ir.actions.act_url": "ir.actions.url",
}
@openerpweb.jsonrequest
def load(self, req, action_id, do_not_eval=False):
Actions = req.session.model('ir.actions.actions')
value = False
context = req.session.eval_context(req.context)
base_action = Actions.read([action_id], ['type'], context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(context)
action_model = self.action_mapping.get(action_type, action_type)
action = req.session.model(action_model).read([action_id], False, ctx)
if action:
value = clean_action(req, action[0], do_not_eval)
return {'result': value}
@openerpweb.jsonrequest
def run(self, req, action_id):
return clean_action(req, req.session.model('ir.actions.server').run(
[action_id], req.session.eval_context(req.context)))
class Export(View):
_cp_path = "/web/export"
@openerpweb.jsonrequest
def formats(self, req):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return sorted([
controller.fmt
for path, controller in openerpweb.controllers_path.iteritems()
if path.startswith(self._cp_path)
if hasattr(controller, 'fmt')
], key=operator.itemgetter("label"))
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.session.eval_context(req.context))
return fields
@openerpweb.jsonrequest
def get_fields(self, req, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(req, model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: field[1].get('string', ''))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@openerpweb.jsonrequest
def namelist(self,req, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = req.session.model("ir.exports").read([export_id])[0]
export_fields_list = req.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
req, model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, req, model, export_fields):
info = {}
fields = self.fields_get(req, model)
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
req, fields[base]['relation'], base, fields[base]['string'],
subfields
))
else:
info[base] = fields[base]['string']
return info
def graft_subfields(self, req, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(req, model, export_fields).iteritems())
#noinspection PyPropertyDefinition
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
@openerpweb.httprequest
def index(self, req, data, token):
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
simplejson.loads(data))
context = req.session.eval_context(req.context)
Model = req.session.model(model)
ids = ids or Model.search(domain, 0, False, False, context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return req.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition', 'attachment; filename="%s"' % self.filename(model)),
('Content-Type', self.content_type)],
cookies={'fileToken': int(token)})
class CSVExport(Export):
_cp_path = '/web/export/csv'
fmt = {'tag': 'csv', 'label': 'CSV'}
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(Export):
_cp_path = '/web/export/xls'
fmt = {
'tag': 'xls',
'label': 'Excel',
'error': None if xlwt else "XLWT required"
}
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
style = xlwt.easyxf('align: wrap yes')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if cell_value is False: cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(View):
_cp_path = "/web/report"
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@openerpweb.httprequest
def index(self, req, action, token):
action = simplejson.loads(action)
report_srv = req.session.proxy("report")
context = req.session.eval_context(
common.nonliterals.CompoundContext(
req.context or {}, action[ "context"]))
report_data = {}
report_ids = context["active_ids"]
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
req.session._db, req.session._uid, req.session._password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
req.session._db, req.session._uid, req.session._password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
# Load built-in report name
file_name = action['report_name']
# Try to get current object model and their ids from context
if 'context' in action:
action_context = action['context']
if action_context.get('active_model') and action_context['active_ids']:
# Use built-in ORM method to get data from DB
m = req.session.model(action_context['active_model'])
#r = m.name_get(action_context['active_ids'], context)
res = []
try:
res = m.name_get(action_context['active_ids'], context)
except xmlrpclib.Fault:
#we assume this went wrong because of incorrect/missing
#_rec_name. We don't have access to _columns here to do
# a proper check
pass
# Parse result to create a better filename
try:
item_names = [item[1] or str(item[0]) for item in res]
except:
item_names = [u'']
if action.get('name'):
item_names.insert(0, action['name'])
#file_name = '-'.join(item_names)
## Create safe filename
#p = re.compile('[/:(")<>|?*]|(\\\)')
#file_name = p.sub('_', file_name)
#only change filename if we have something better
if item_names:
file_name = '-'.join(item_names)
# Create safe filename
p = re.compile('[/:(")<>|?*]|(\\\)')
file_name = p.sub('_', file_name)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
return req.make_response(report,
headers=[
('Content-Disposition', 'attachment; filename="%s.%s"' % (file_name, report_struct['format'])),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': int(token)})
class Import(View):
_cp_path = "/web/import"
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.session.eval_context(req.context))
return fields
@openerpweb.httprequest
def detect_data(self, req, csvfile, csvsep=',', csvdel='"', csvcode='utf-8', jsonp='callback'):
try:
data = list(csv.reader(
csvfile, quotechar=str(csvdel), delimiter=str(csvsep)))
except csv.Error, e:
csvfile.seek(0)
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'error': {
'message': 'Error parsing CSV file: %s' % e,
# decodes each byte to a unicode character, which may or
# may not be printable, but decoding will succeed.
# Otherwise simplejson will try to decode the `str` using
# utf-8, which is very likely to blow up on characters out
# of the ascii range (in range [128, 256))
'preview': csvfile.read(200).decode('iso-8859-1')}}))
try:
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps(
{'records': data[:10]}, encoding=csvcode))
except UnicodeDecodeError:
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({
'message': u"Failed to decode CSV file using encoding %s, "
u"try switching to a different encoding" % csvcode
}))
@openerpweb.httprequest
def import_data(self, req, model, csvfile, csvsep, csvdel, csvcode, jsonp,
meta):
skip, indices, fields, context = \
operator.itemgetter('skip', 'indices', 'fields', 'context')(
simplejson.loads(meta, object_hook=common.nonliterals.non_literal_decoder))
error = None
if not (csvdel and len(csvdel) == 1):
error = u"The CSV delimiter must be a single character"
if not indices and fields:
error = u"You must select at least one field to import"
if error:
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'error': {'message': error}}))
# skip ignored records (@skip parameter)
# then skip empty lines (not valid csv)
# nb: should these operations be reverted?
rows_to_import = itertools.ifilter(
None,
itertools.islice(
csv.reader(csvfile, quotechar=str(csvdel), delimiter=str(csvsep)),
skip, None))
# if only one index, itemgetter will return an atom rather than a tuple
if len(indices) == 1: mapper = lambda row: [row[indices[0]]]
else: mapper = operator.itemgetter(*indices)
data = None
error = None
try:
# decode each data row
data = [
[record.decode(csvcode) for record in row]
for row in itertools.imap(mapper, rows_to_import)
# don't insert completely empty rows (can happen due to fields
# filtering in case of e.g. o2m content rows)
if any(row)
]
except UnicodeDecodeError, e:
# decode with iso-8859-1 for error display: always works-ish
error = u"Failed to decode cell %r using encoding %s: '%s'" % (
e.object, e.encoding, e.reason)
except csv.Error, e:
error = u"Could not process CSV file: %s" % e
# If the file contains nothing, and the import has not already blown up
if not (error or data):
error = u"File to import is empty"
if error:
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'error': {'message': error}}))
try:
(code, record, message, _nope) = req.session.model(model).import_data(
fields, data, 'init', '', False,
req.session.eval_context(context))
except xmlrpclib.Fault, e:
error = {"message": u"%s, %s" % (e.faultCode, e.faultString)}
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'error':error}))
if code != -1:
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'success':True}))
msg = u"Error during import: %s\n\nTrying to import record %r" % (
message, record)
return '<script>window.top.%s(%s);</script>' % (
jsonp, simplejson.dumps({'error': {'message':msg}}))
| iw3hxn/LibrERP | web_client/ea_web-github/addons/web/controllers/main.py | Python | agpl-3.0 | 81,900 | [
"VisIt"
] | acbfcc0b1f15d65bc53d6a200573d0b0a2239addc28e045ecb2ebc1a16f83f14 |
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from os import path
from collections import OrderedDict
from IPython.display import display, clear_output
from matplotlib import pyplot as plt
from matplotlib import transforms
from matplotlib import rcParams
from numpy import linspace
import ipywidgets as widgets
from pysces import ModelMap
from pysces import output_dir as psc_out_dir
import pysces
import gzip
import pickle as pickle
from ..misc import *
from ...latextools import LatexExpr
from ... import modeltools
def save_data2d(data_2dobj, file_name):
"""
Saves a Data2D object to a gzipped cPickle to a specified file name.
"""
mod = data_2dobj.mod
data_2dobj.mod = data_2dobj.mod.ModelFile
with gzip.open(file_name, 'wb') as f:
pickle.dump(data_2dobj, f)
data_2dobj.mod = mod
def load_data2d(file_name, mod=None, ltxe=None):
"""
Loads a gzipped cPickle file containing a Data2D object. Optionally
a model can be provided (which is useful when loading data that
reference the same model. For the same reason a LatexExpr object
can be supplied.
"""
with gzip.open(file_name, 'rb') as f:
data_2dobj = pickle.load(f)
if not mod:
data_2dobj.mod = pysces.model(data_2dobj.mod)
else:
data_2dobj.mod = mod
if ltxe:
del data_2dobj._ltxe
data_2dobj._ltxe = ltxe
return data_2dobj
#matplotlib 1.5 breaks set_color_cycle functionality
#now we need cycler
from matplotlib import __version__ as mpl_version
use_cycler = False
from distutils.version import LooseVersion
if LooseVersion(mpl_version) >= LooseVersion('1.5.0'):
from cycler import cycler
use_cycler = True
exportLAWH = silence_print(pysces.write.exportLabelledArrayWithHeader)
"""
This whole module is fd in the a
"""
__all__ = ['LineData',
'ScanFig',
'Data2D',
'load_data2d',
'save_data2d',
'SimpleData2D']
def _add_legend_viewlim(ax, **kwargs):
""" Reset the legend in ax to only display lines that are
currently visible in plot area """
# THIS FUNCTION COMES FROM
# http://matplotlib.1069221.n5.nabble.com/
# Re-Limit-legend-to-visible-data-td18335.html
label_objs = []
label_texts = []
# print "viewLim:", ax.viewLim
for line in ax.lines:
line_label = line.get_label()
cond = line.get_visible() and \
line_label and not line_label.startswith("_")
if cond:
line_bbox = transforms.Bbox.unit()
line_bbox.update_from_data_xy(line.get_xydata())
if ax.viewLim.overlaps(line_bbox):
# print line_label, line_bbox
label_objs.append(line)
label_texts.append(line_label)
if label_objs:
return ax.legend(label_objs, label_texts, **kwargs)
elif ax.get_legend():
ax.get_legend().set_visible(False)
else:
ax.legend().set_visible(False)
class LineData(object):
"""
An object that contains data and metadata used by ``ScanFig`` to draw a
``matplotlib`` line with interactivity.
This object is used to initialise a ``ScanFig`` object together with a
``Data2D`` object. Once a ``ScanFig`` instance is initialised, the
``LineData`` objects are saved in a list ``_raw_line_data``. Changing
any values there will have no effect on the output of the ``ScanFig``
instance. Actual x,y data, ``matplotlib`` line metadata, and ``ScanFig``
category metadata is stored.
Parameters
----------
name : str
The name of the line. Will be used as a label if none is specified.
x_data : array_like
The x data.
y_data : array_like
The y data.
categories : list, optional
A list of categories that a line falls into. This will be used by
ScanFig to draw buttons that enable/disable the line.
properties : dict, optional
A dictionary of properties of the line to be drawn. This dictionary
will be used by the generic ``set()`` function of
``matplotlib.Lines.Line2D`` to set the properties of the line.
See Also
--------
ScanFig
Data2D
RateChar
"""
def __init__(self, name, x_data, y_data, categories=None, properties=None):
super(LineData, self).__init__()
self.name = name
self.x = x_data
self.y = y_data
if categories:
self.categories = categories
else:
self.categories = [self.name]
if properties:
self.properties = properties
else:
self.properties = {}
self._update_attach_properties()
def _update_attach_properties(self):
"""
Attaches all properties in ``self.properties`` to the ``self``
namespace.
"""
# TODO Figure out why the properties are (or need to be) attached in this way. It seems unnecessary
for k, v in self.properties.items():
setattr(self, k, v)
def add_property(self, key, value):
"""
Adds a property to the ``properties`` dictionary of the
``LineData`` object.
The ``properties`` dictionary of ``LineData`` will be used by the
generic ``set()`` function of ``matplotlib.Lines.Line2D``
to set the properties of the line.
Parameters
----------
key : str
The name of the ``matplotlib.Lines.Line2D`` property to be set.
value : sting, int, bool
The value of the property to be set. The type depends on the
property.
"""
self.properties.update({key, value})
self._update_attach_properties()
class SimpleData2D(object):
def __init__(self, column_names, data_array, mod=None):
super(SimpleData2D, self).__init__()
self.mod = mod
if self.mod:
self._ltxe = LatexExpr(mod)
else:
self._ltxe = None
self.scan_results = DotDict()
self.scan_results['scan_in'] = column_names[0]
self.scan_results['scan_out'] = column_names[1:]
self.scan_results['scan_range'] = data_array[:, 0]
self.scan_results['scan_results'] = data_array[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self._column_names = column_names
self._scan_results = data_array
self._setup_lines()
def _setup_lines(self):
"""
Sets up ``LineData`` objects that will be used to populate ``ScanFig``
objects created by the ``plot`` method of ``Data2D``. These objects
are stored in a list: ``self._lines``
``ScanFig`` takes a list of ``LineData`` objects as an argument and
this method sets up that list. The ``self._column_categories``
dictionary is used here.
"""
lines = []
for i, each in enumerate(self.scan_results.scan_out):
if self._ltxe:
label = self._ltxe.expression_to_latex(each)
else:
label = each
line = LineData(name=each,
x_data=self.scan_results.scan_range,
y_data=self.scan_results.scan_results[:, i],
categories=[each],
properties={'label': '$%s$' % (label),
'linewidth': 1.6})
lines.append(line)
self._lines = lines
def plot(self):
"""
Creates a ``ScanFig`` object using the data stored in the current
instance of ``Data2D``
Returns
-------
``ScanFig``
A `ScanFig`` object that is used to visualise results.
"""
base_name = 'scan_fig'
scan_fig = ScanFig(self._lines,
base_name=base_name,
ax_properties={'xlabel':
self.scan_results.scan_in})
return scan_fig
def save_results(self, file_name=None, separator=',', fmt='%f'):
"""
Saves data stores in current instance of ``Data2D`` as a comma
separated file.
Parameters
----------
file_name : str, Optional (Default : None)
The file name, extension and path under which data should be saved.
If None the name will default to scan_data.csv and will be saved
either under the directory specified under the directory specified
in ``folder``.
separator : str, Optional (Default : ',')
The symbol which should be used to separate values in the output
file.
fmt : str, Optional (Default : '%f')
Format for the data.
"""
file_name = modeltools.get_file_path(working_dir=None,
internal_filename='scan_fig',
fmt='csv',
fixed=self.scan_results.scan_in,
file_name=file_name)
scan_results = self._scan_results
column_names = self._column_names
try:
exportLAWH(scan_results,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print(e.strerror)
class Data2D(object):
"""
An object that wraps results from a PySCeS parameter scan.
Results from parameter scan or timecourse are used to initialise this
object which in turn is used to create a ``ScanFig`` object. Here results
can easily be accessed and saved to disk.
The ``Data2D`` is also responsible for setting up a ``ScanFig`` object from
analysis results and therefore contains optional parameters for setting
up this object.
Parameters
----------
mod : PysMod
The model for which the parameter scan was performed.
column_names : list of str
The names of each column in the data_array. Columns should be arranged
with the input values (scan_in, time) in the first column and the
output values (scan_out) in the columns that follow.
data_array : ndarray
An array containing results from a parameter scan or tome simulation.
Arranged as described above.
ltxe : LatexExpr, optional (Default : None)
A LatexExpr object that is used to convert PySCeS compatible
expressions to LaTeX math. If None is supplied a new LatexExpr object
will be instantiated. Sharing a single instance saves memory.
analysis_method : str, Optional (Default : None)
A string that indicates the name of the analysis method used to
generate the results that populate ``Data2D``. This will determine
where results are saved by ``Data2D`` as well as any ``ScanFig``
objects that are produced by it.
ax_properties : dict, Optional (Default : None)
A dictionary of properties that will be used by ``ScanFig`` to adjust
the appearance of plots. These properties should compatible with
``matplotlib.axes.AxesSubplot'' object in a way that its ``set``
method can be used to change its properties. If none, a default
``ScanFig`` object is produced by the ``plot`` method.
file_name : str, Optional (Default : None)
The name that should be prepended to files produced any ``ScanFig``
objects produced by ``Data2D``. If None, defaults to 'scan_fig'.
additional_cat_classes : dict, Optional (Default : None)
A dictionary containing additional line class categories for
``ScanFig`` construction. Each ``data_array`` column contains results
representing a specific category of result (elasticity, flux,
concentration) which in turn fall into a larger class of data types
(All Coefficients). This dictionary defines which line classes fall
into which class category. (k = category class; v = line categories)
additional_cats : dict, Optional (Default : None)
A dictionary that defines additional result categories as well as the
lines that fall into these categories. (k = line category, v =
lines in category).
num_of_groups : int, Optional (Default : None)
A number that defines the number of groups of lines. Used to ensure
that the lines that are closely related (e.g. elasticities for one
reaction) have colors assigned to them that are easily differentiable.
working_dir : str, Optional (Default : None)
This string sets the working directory directly and if provided
supersedes ``analysis_method``.
See Also
--------
ScanFig
Data2D
RateChar
"""
def __init__(self,
mod,
column_names,
data_array,
ltxe=None,
analysis_method=None,
ax_properties=None,
file_name=None,
additional_cat_classes=None,
additional_cats=None,
num_of_groups=None,
working_dir=None,
category_manifest=None,
axvline=True):
self.scan_results = DotDict()
self.scan_results['scan_in'] = column_names[0]
self.scan_results['scan_out'] = column_names[1:]
self.scan_results['scan_range'] = data_array[:, 0]
self.scan_results['scan_results'] = data_array[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self._column_names = column_names
self._scan_results = data_array
if not category_manifest:
category_manifest = {}
self._category_manifest = category_manifest
self.mod = mod
scan_in = self.scan_results.scan_in
if not analysis_method:
if scan_in.lower() == 'time':
analysis_method = 'simulation'
elif hasattr(self.mod, scan_in):
analysis_method = 'parameter_scan'
else:
analysis_method = 'custom'
self._analysis_method = analysis_method
if scan_in.lower() != 'time':
try:
self.mod.doMcaRC()
except:
pass
if axvline:
self._vline_val = None
if scan_in.lower() != 'time' and hasattr(self.mod, scan_in):
self._vline_val = getattr(self.mod, scan_in)
if not ltxe:
ltxe = LatexExpr(mod)
self._ltxe = ltxe
#TODO check if this is even needed
self._fname_specified = False
if not file_name:
self._fname = 'scan_data'
else:
self._fname = file_name
self._fname_specified = True
#This is here specifically for the do_mca_scan method of pysces. If
if not working_dir:
working_dir = modeltools.make_path(mod=self.mod,
analysis_method=self._analysis_method)
self._working_dir = working_dir
self._ax_properties_ = ax_properties
# So in order for ScanFig to have all those nice buttons that are
# organised so well we need to set it up beforehand. Basically
# each different line has different categories of lines that it falls
# into. Then each each of these categories falls into a category class.
# Each ``_category_classes`` key represents a category class and the
# value is a list of categories that fall into a class.
#
# The dictionary ``_scan_types`` contains the different categories that
# a line can fall into (in addition to the category containing itself).
# Here a keys is a category and value is a list of lines in this
# category.
#
# Buttons will be arranged so that a category class is a label under
# which all the buttons that toggle a certain category is arranged
# under. For instance under the label'All Coefficients' will be the
# buttons 'Elasticity Coefficients', 'Control Coefficients',
# 'Response Coefficients etc.
#
# We also add _scan_types to the ``_category_classes`` so that each
# individual line has its own button.
# There will therefore be a button called 'Control Coefficients' that
# fall under the 'All Coefficients' category class label as well as a
# label for the category class called 'Control Coefficients' under
# which all the different control coefficient buttons will be
# arranged.
if not additional_cat_classes:
additional_cat_classes = {}
self._additional_cat_classes = additional_cat_classes
if not additional_cats:
additional_cats = {}
self._additional_cats = additional_cats
self._setup_lines()
if num_of_groups:
self._lines = group_sort(self._lines, num_of_groups)
@property
def _category_classes(self):
category_classes = OrderedDict([('All Coefficients',
['Elasticity Coefficients',
'Control Coefficients',
'Response Coefficients',
'Partial Response Coefficients',
'Control Patterns']),
('All Fluxes/Reactions/Species/Parameters',
['Flux Rates',
'Reaction Rates',
'Species Concentrations',
'Steady-State Species Concentrations',
'Parameters'])])
additional_cat_classes = self._additional_cat_classes
for k, v in additional_cat_classes.items():
if k in category_classes:
lst = category_classes[k]
new_lst = list(set(lst + v))
category_classes[k] = new_lst
else:
category_classes[k] = v
category_classes.update(self._scan_types)
return category_classes
@property
def _scan_types(self):
scan_types = OrderedDict([
('Flux Rates', ['J_' + reaction for reaction in self.mod.reactions]),
('Reaction Rates', [reaction for reaction in self.mod.reactions]),
('Species Concentrations', self.mod.species + self.mod.fixed_species),
('Steady-State Species Concentrations',[sp + '_ss' for sp in self.mod.species]),
('Elasticity Coefficients', ec_list(self.mod)),
('Control Coefficients', cc_list(self.mod)),
('Response Coefficients', rc_list(self.mod)),
('Partial Response Coefficients', prc_list(self.mod)),
('Control Patterns', ['CP{:3}'.format(n).replace(' ','0')
for n in range(1, len(self._column_names))]),
('Parameters', self.mod.parameters)])
additional_cats = self._additional_cats
if additional_cats:
for k, v in additional_cats.items():
if k in scan_types:
lst = scan_types[k]
new_lst = list(set(lst + v))
scan_types[k] = new_lst
else:
scan_types[k] = v
return scan_types
@property
def _column_categories(self):
"""
This method sets up the categories for each data column stored by this
object. These categories are stored in a dictionary as
``self._column_categories``.
Each line falls into its own category as well as another category
depending on what type of data it represents. So 'Species1' will
fall into the category 'Species1' as well as 'Species Concentrations'
Therefore the ``ScanFig`` buttons labelled 'Species1' and 'Species
Concentrations' need to be toggled on for the line representing
the parameter scan results of Species1 to be visible on the
``ScanFig`` figure.
"""
scan_types = self._scan_types
column_categories = {}
for column in self.scan_results.scan_out:
column_categories[column] = [column]
for k, v in scan_types.items():
if column in v:
column_categories[column].append(k)
break
return column_categories
def _setup_lines(self):
"""
Sets up ``LineData`` objects that will be used to populate ``ScanFig``
objects created by the ``plot`` method of ``Data2D``. These objects
are stored in a list: ``self._lines``
``ScanFig`` takes a list of ``LineData`` objects as an argument and
this method sets up that list. The ``self._column_categories``
dictionary is used here.
"""
_column_categories = self._column_categories
lines = []
for i, each in enumerate(self.scan_results.scan_out):
line = LineData(name=each,
x_data=self.scan_results.scan_range,
y_data=self.scan_results.scan_results[:, i],
categories=_column_categories[each],
properties={'label':
'$%s$' %
(self._ltxe.expression_to_latex(
each)),
'linewidth': 1.6})
lines.append(line)
self._lines = lines
@property
def _ax_properties(self):
"""
Internal property of ``Data2D``. If no ``ax_properties`` argument is
specified in __init__ this property defines the xlabel of the
``ScanFig`` object depending on the value of ``self.scan_in``.
"""
if not self._ax_properties_:
self._ax_properties_ = {'xlabel': self._x_name}
return self._ax_properties_
@property
def _x_name(self):
mm = ModelMap(self.mod)
species = mm.hasSpecies()
x_name = ''
# TODO Enable lower case "time" as well as well as making generic for minutes/hours
if self.scan_results.scan_in.lower() == 'time':
x_name = 'Time'
elif self.scan_results.scan_in in species:
x_name = '[%s]' % self.scan_results.scan_in
elif self.scan_results.scan_in in self.mod.parameters:
x_name = self.scan_results.scan_in
return x_name
def plot(self):
"""
Creates a ``ScanFig`` object using the data stored in the current
instance of ``Data2D``
Returns
-------
``ScanFig``
A `ScanFig`` object that is used to visualise results.
"""
if self._fname_specified:
base_name = self._fname
else:
base_name = 'scan_fig'
scan_fig = ScanFig(self._lines,
category_classes=self._category_classes,
ax_properties=self._ax_properties,
working_dir=path.join(self._working_dir,
self.scan_results.scan_in, ),
base_name=base_name, )
for k,v in self._category_manifest.items():
scan_fig.toggle_category(k,v)
if self._vline_val:
scan_fig.ax.axvline(self._vline_val, ls=':', color='gray')
return scan_fig
def save_results(self, file_name=None, separator=',',fmt='%f'):
"""
Saves data stores in current instance of ``Data2D`` as a comma
separated file.
Parameters
----------
file_name : str, Optional (Default : None)
The file name, extension and path under which data should be saved.
If None the name will default to scan_data.csv and will be saved
either under the directory specified under the directory specified
in ``folder``.
separator : str, Optional (Default : ',')
The symbol which should be used to separate values in the output
file.
format : str, Optional (Default : '%f')
Format for the data.
"""
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=self._fname,
fmt='csv',
fixed=self.scan_results.scan_in,
file_name=file_name)
scan_results = self._scan_results
column_names = self._column_names
try:
exportLAWH(scan_results,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print(e.strerror)
class ScanFig(object):
"""
Uses data in the form of a list of LineData objects to display interactive
plots.
Interactive plots can be customised in terms of which data is visible at
any one time by simply clicking a button to toggle a line. Matplotlib
figures are used internally, therefore ScanFig figures can be altered
by changing the properties of the internal figure.
Parameters
----------
line_data_list : list of LineData objects
A LineData object contains the information needed to draw a single
curve on a matplotlib figure. Here a list of these objects are used
to populate the internal matplotlib figure with the various curves
that represent the results of a parameter scan or simulation.
category_classes : dict, Optional (Default : None)
Each line on a ScanFig plot falls into a different category. Each of
these categories in turn fall into a different class. Each category
represents a button which toggles the lines which fall into the
category while the button is arranged under a label which is
represented by a category class. Each key in this dict is a category
class and the value is a list of categories that fall into this class.
If None all categories will fall into the same class.
fig_properties : dict, Optional (Default : None)
A dictionary of properties that will be used to adjust the appearance
of the figure. These properties should compatible with
``matplotlib.figure.Figure'' object in a way that its ``set``
method can be used to change its properties. If None, default
matplotlib figure properties will be used.
ax_properties : dict, Optional (Default : None)
A dictionary of properties that will be used to adjust the appearance
of plot axes. These properties should compatible with
``matplotlib.axes.AxesSubplot'' object in a way that its ``set``
method can be used to change its properties. If None default matplotlib
axes properties will be used.
base_name : str, Optional (Default : None)
Base name that will be used when an image is saved by ``ScanFig``. If
None, then ``scan_fig`` will be used.
working_dir : str, Optional (Default : None)
The directory in which files figures will be saved. If None, then it
will default to the directory specified in ``pysces.output_dir``.
See Also
--------
LineData
Data2D
"""
def __init__(self, line_data_list,
category_classes=None,
fig_properties=None,
ax_properties=None,
base_name=None,
working_dir=None):
super(ScanFig, self).__init__()
self._categories_ = None
self._categories_status = None
self._lines_ = None
self._widgets_ = None
self._figure_widgets_ = None
self._raw_line_data = line_data_list
self._figure_output = widgets.Output()
self._widget_output = widgets.Output()
self._figure_widget_output = widgets.Output()
# figure setup
plt.ioff()
# inline displays figures smaller than nbAgg for some reason
if 'backend_inline' in rcParams['backend']:
self.fig = plt.figure(figsize=(10, 5.7))
rcParams.update({'font.size': 16})
else:
self.fig = plt.figure(figsize=(7, 5))
if fig_properties:
self.fig.set(**fig_properties)
# axis setup
self.ax = self.fig.add_subplot(111)
if ax_properties:
self.ax.set(**ax_properties)
# colourmap_setup
# at the moment this is very basic and could be expanded
# it would be useful to set it up based on category somehow
cmap = plt.get_cmap('Set1')(
linspace(0, 1.0, len(line_data_list)))
if use_cycler:
col_cycler = cycler('color',cmap)
self.ax.set_prop_cycle(col_cycler)
else:
self.ax.set_color_cycle(cmap)
if category_classes:
new_cat_classes = OrderedDict()
for k, v in category_classes.items():
for each in self._categories.keys():
if each in v:
if not k in new_cat_classes:
new_cat_classes[k] = []
new_cat_classes[k].append(each)
self._category_classes = new_cat_classes
else:
self._category_classes = {'': [k for k in self._categories]}
if base_name:
self._base_name = base_name
else:
self._base_name = 'scan_fig'
if working_dir:
self._working_dir = working_dir
else:
self._working_dir = psc_out_dir
self._lines
if 'backend_inline' in rcParams['backend']:
plt.close()
self._save_button_ = None
@property
def _save_button(self):
if not self._save_button_:
def save(clicked):
self.save()
self._save_button_ = widgets.Button()
self._save_button_.description = 'Save'
self._save_button_.on_click(save)
return self._save_button_
def show(self):
"""
Displays the figure.
Depending on the matplotlib backend this function will either display
the figure inline if running in an ``IPython`` notebook with the
``--pylab=inline`` switch or with the %matplotlib inline IPython line
magic, alternately it will display the figure as determined by the
``rcParams['backend']`` option of ``matplotlib``. Either the inline or
nbAgg backends are recommended.
See Also
--------
interact
adjust_figure
"""
display(self._figure_output)
self._redraw()
def _redraw(self):
_add_legend_viewlim(
self.ax,
bbox_to_anchor=(0, -0.17),
ncol=5,
loc=2,
borderaxespad=0.)
if not 'backend_inline' in rcParams['backend']:
self.fig.tight_layout() # need to rescale, nbAgg does not provide extra space for legend
self._figure_output.clear_output(wait=True)
with self._figure_output:
display(self.fig)
def save(self, file_name=None, dpi=None, fmt=None, include_legend=True):
"""
Saves the figure in it's current configuration.
Parameters
----------
file_name : str, Optional (Default : None)
The file name to be used. If None is provided the file will be saved
to ``working_dir/base_name.fmt``
dpi : int, Optional (Default : None)
The dpi to use. Defaults to 180.
fmt : str, Optional (Default : None)
The image format to use. Defaults to ``svg``. If ``file_name``
contains a valid extension it will supersede ``fmt``.
"""
if not fmt:
fmt = 'svg'
if not dpi:
dpi = 180
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=self._base_name,
fmt=fmt,
file_name=file_name)
fmt = modeltools.get_fmt(file_name)
if include_legend:
self.fig.savefig(file_name,
format=fmt,
dpi=dpi,
bbox_extra_artists=(self.ax.get_legend(),),
bbox_inches='tight')
else:
leg = self.ax.legend_
self.ax.legend_ = None
self.fig.savefig(file_name,
format=fmt,
dpi=dpi,)
self.ax.legend_ = leg
@property
def _widgets(self):
if not self._widgets_:
widget_classes = OrderedDict()
for k in self._category_classes.keys():
box = widgets.HBox()
box.layout.display = 'flex-flow'
widget_classes[k] = box
def oc(cat):
def on_change(value):
self.toggle_category(cat, value['new'])
self._redraw()
return on_change
width = self._find_button_width()
for each in self._categories:
w = widgets.ToggleButton()
w.description = each
w.width = width
w.value = self.categories_status[each]
on_change = oc(each)
w.observe(on_change, 'value')
for k, v in self._category_classes.items():
if each in v:
widget_classes[k].children += (w),
# this is needed to sort widgets according to alphabetical order
for k, v in widget_classes.items():
children_list = list(v.children)
names = [getattr(widg, 'description')
for widg in children_list]
names.sort()
new_children_list = []
for name in names:
for child in children_list:
if child.description == name:
new_children_list.append(child)
v.children = tuple(new_children_list)
self._widgets_ = widget_classes
return self._widgets_
@property
def _figure_widgets(self):
"""
Instantiates the widgets that will be used to adjust the figure.
At the moment widgets for manipulating the following paramers
are available:
minimum and maximum x values on the x axis
minimum and maximum y values on the y axis
the scale of the x and y axis i.e. log vs linear
The following are possible TODOs:
figure size
y label
x label
figure title
"""
def convert_scale(val):
"""
Converts between str and bool for the strings 'log' and 'linear'
The string 'log' returns True, while True returns 'log'.
The string 'linear' returns False, while False returns 'linear'
Parameters
----------
val : str, bool
The value to convert.
Returns
-------
value : str, bool
The conversion of the parameter ``val``
Examples
--------
>>> convert_scale('log')
True
>>> convert_scale(False)
'linear'
"""
if type(val) == bool:
if val is True:
return 'log'
elif val is False:
return 'linear'
else:
if val == 'log':
return True
elif val == 'linear':
return False
def c_v(val):
if val <= 0:
return 0.001
else:
return val
if not self._figure_widgets_:
min_x = widgets.FloatText()
max_x = widgets.FloatText()
min_x.value, max_x.value = self.ax.get_xlim()
min_x.description = 'min'
max_x.description = 'max'
min_y = widgets.FloatText()
max_y = widgets.FloatText()
min_y.value, max_y.value = self.ax.get_ylim()
min_y.description = 'min'
max_y.description = 'max'
log_x = widgets.Checkbox()
log_y = widgets.Checkbox()
log_x.value = convert_scale(self.ax.get_xscale())
log_y.value = convert_scale(self.ax.get_yscale())
log_x.description = 'x_log'
log_y.description = 'y_log'
apply_btn = widgets.Button()
apply_btn.description = 'Apply'
def set_values(clicked):
if log_x.value is True:
min_x.value = c_v(min_x.value)
max_x.value = c_v(max_x.value)
self.ax.set_xlim([min_x.value, max_x.value])
if log_y.value is True:
min_y.value = c_v(min_y.value)
max_y.value = c_v(max_y.value)
self.ax.set_ylim([min_y.value, max_y.value])
self.ax.set_xscale(convert_scale(log_x.value))
self.ax.set_yscale(convert_scale(log_y.value))
self._redraw()
apply_btn.on_click(set_values)
x_lims = widgets.HBox(children=[min_x, max_x])
y_lims = widgets.HBox(children=[min_y, max_y])
lin_log = widgets.HBox(children=[log_x, log_y])
apply_con = widgets.HBox(children=[apply_btn, self._save_button])
_figure_widgets_ = OrderedDict()
_figure_widgets_['X axis limits'] = x_lims
_figure_widgets_['Y axis limits'] = y_lims
_figure_widgets_['Axis scale'] = lin_log
_figure_widgets_[' '] = apply_con
self._figure_widgets_ = _figure_widgets_
return self._figure_widgets_
@property
def _categories(self):
if not self._categories_:
main_cats = []
cats = []
for each in self._raw_line_data:
cats += each.categories
main_cats.append(each.categories[0])
cats = list(set(cats))
cat_dict = {}
for each in cats:
cat_dict[each] = []
for each in self._raw_line_data:
line = self._lines[each.name]
for cat in each.categories:
cat_dict[cat].append(line)
self._categories_ = cat_dict
return self._categories_
@property
def category_names(self):
return list(self._categories.keys())
@property
def categories_status(self):
if not self._categories_status:
cat_stat_dict = {}
for each in self._categories:
cat_stat_dict[each] = False
self._categories_status = cat_stat_dict
return self._categories_status
@property
def _lines(self):
if not self._lines_:
lines = {}
for i, each in enumerate(self._raw_line_data):
line, = self.ax.plot(each.x, each.y)
# set width to a default width of 2
# bc the default value of one is too low
line.set_linewidth(2)
if each.properties:
line.set(**each.properties)
else:
line.set_label(each.name)
line.set_visible(False)
lines[each.name] = line
self._lines_ = lines
return self._lines_
@property
def line_names(self):
lines = list(self._lines.keys())
lines.sort()
return lines
def toggle_line(self, name, value):
"""
Changes the visibility of a certain line.
When used a specific line's visibility is changed according to the
``value`` provided.
Parameters
----------
name: str
The name of the line to change.
value: bool
The visibility status to change the line to (True for visible,
False for invisible).
See Also
--------
toggle_category
"""
self._lines[name].set_visible(value)
def toggle_category(self, cat, value):
"""
Changes the visibility of all the lines in a certain line category.
When used all lines in the provided category's visibility is changed
according to the ``value`` provided.
Parameters
----------
cat: str
The name of the category to change.
value: bool
The visibility status to change the lines to (True for visible,
False for invisible).
See Also
--------
toggle_line
"""
# get the visibility status of the category eg. True/False
self.categories_status[cat] = value
# get all the other categories
other_cats = list(self._categories.keys())
other_cats.pop(other_cats.index(cat))
# self.categories is a dict with categories as keys
# and list of lines that fall within a category
# as a value. So for each line that falls in a cat
for line in self._categories[cat]:
# The visibility for a line has not changed at the start of
# the loop
in_other_cats = False
# A line can also fall within another category
other_cat_stats = []
for each in other_cats:
if line in self._categories[each]:
other_cat_stats.append(self.categories_status[each])
in_other_cats = True
# If a line is never in any other categories
# just set its visibility as it is dictated by
# its category status.
if in_other_cats:
visibility = all([value] + other_cat_stats)
line.set_visible(visibility)
else:
line.set_visible(value)
def interact(self):
"""
Displays the figure in a IPython/Jupyter notebook together with buttons
to toggle the visibility of certain lines.
See Also
--------
show
adjust_figure
"""
display(self._figure_output)
self._redraw()
display(self._widget_output)
self._widget_output.clear_output()
with self._widget_output:
for k, v in self._widgets.items():
if len(v.children) > 0:
head = widgets.Label(value=k)
display(head)
display(v)
v._css = [(None, 'flex-wrap', 'wrap'), ]
# v.remove_class('vbox')
# v.add_class('hbox')
# v.set_css({'flex-wrap': 'wrap'})
# display(widgets.Label(value='$~$'))
display(self._save_button)
for boxes in self._widgets.values():
for button in boxes.children:
button.value = self.categories_status[button.description]
# self._save_button.remove_class('vbox')
# self._save_button.add_class('hbox')
def adjust_figure(self):
"""
Provides widgets to set the limits and scale (log/linear) of the figure.
As with ``interact``, the plot is displayed in the notebook. Here
no widgets are provided the change the visibility of the data
displayed on the plot, rather controls to set the limits and scale are
provided.
See Also
--------
show
interact
"""
display(self._figure_widget_output)
with self._figure_widget_output:
for k, v in self._figure_widgets.items():
if len(v.children) > 0:
head = widgets.Label(value=k)
display(head)
display(v)
# v.remove_class('vbox')
# v.add_class('hbox')
v._css = [(None, 'flex-wrap', 'wrap'), ]
# display(widgets.Label(value='$~$'))
# display(self._save_button)
self.show()
# self._save_button.remove_class('vbox')
# self._save_button.add_class('hbox')
def _find_button_width(self):
longest = sorted([len(each) for each in self._categories])[-1]
if longest > 14:
width_px = (longest - 14) * 5 + 145
width = str(width_px) + 'px'
else:
width = '145px'
return width
| PySCeS/PyscesToolbox | psctb/utils/plotting/_plotting.py | Python | bsd-3-clause | 45,441 | [
"PySCeS"
] | a118ac5e63bd420e4d4b2d26abbe63e06913ba61a67c28cb6297d751ac3d8f1d |
#!/usr/bin/env python
# Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# #
# ESPResSo++ Python script for a quantum/classical path integral-based AdResS simulation #
# #
################################################################################
import time as pytime
import espressopp
import mpi4py.MPI as MPI
from espressopp import Real3D
from espressopp.tools import decomp
# Performs a path integral-based quantum/classical adaptive resolution simulation
# following the methodology proposed in J. Chem. Phys 147, 244104 (2017)
# The example system is liquid water, modeled using the force field proposed
# in J. Chem. Theory Comput. 10, 816 (2014). In the classical region a WCA potential
# between the oxygens is used.The simulation setup is similar to those used in the JCP paper.
print('Performing an ESPResSo++ path integral-based quantum/classical adaptive resolution simulation.\n')
########################################################
# 1. specification of the system setup and simulation parameters #
########################################################
print('Specifying simulation parameters...')
steps =1000 # integration steps (outer steps of the multiple time stepping integrator)
intervals = 100 # intervals
timestep_short = 0.002/40.0 # the shortest timestep (interaction between Trotter beads on the ring polymers)
multiplier_short_to_medium = 10 # timestep_short * multiplier_short_to_medium gives the medium timestep (bonded interactions between atoms)
multiplier_medium_to_long = 4 # timestep_short * multiplier_short_to_medium * multiplier_medium_to_long gives the long timestep (non-bonded interactions between atoms)
interaction_cutoff = 0.84 # interaction cutoff for verletlist construction. Note: the verletlist is constucted using the ring polymers' centroids while the interaction takes place between the beads. Hence, this needs to be slightly larger (at least twice the ring polymers' radius of gyration) than the true potential cutoff
potential_cutoff = 0.78 # potential cutoff
skin = 0.1 # Verlet list skin
gamma = 2.0 # thermostat gamma
temp = 2.50266751 # thermostat temperature in reduced units
ex_size = 1.0 # redius of path integral region
hy_size = 1.5 # width of hybrid region
nTrotter = 32 # Trotter number
clmassmultiplier = 100.0 # classical mass multiplier with respect to real mass
constkinmass = False # speedup in classical region for interatomic interactions (centroids instead of all beads)?
PILE = True # Path Integral Langevin Equation thermostating (J. Chem. Phys. 133, 124104 (2010))? Only applicable when using real kinetic masses
PILElambda = 0.5 # PILE lambda parameter
realkinmass = True # real kinetic masses (for TRPMD)?
centroidthermostat = True # thermostat also the centroid (for CMD)?
CMDparameter = 1.0 # adiabatic decoupling gamma^2 for CMD
KTI = False # Kirkwood Thermodynamic Integration flag
speedupInterAtom = True # speedup in classical region for interatomic interactions (use centroids instead of all beads)?
speedupFreezeRings = False # speedup in classical region by freezing rings (skipping internal ring motion integration)? Note that, if set to True, the temperature will not be correct anymore, because freezing the rings in the CL region effectively reduces the number of degrees of freedom in the system
######################
# 2. read in coordinates #
######################
print('Reading in coordinates...')
# read equilibrated configuration file
pid, types, x, y, z, vx, vy, vz, Lx, Ly, Lz = espressopp.tools.readxyz("input.xyz")
# Make masses
masses = []
for item in types:
if item == 1:
masses.append(15.9994)
else:
masses.append(1.008)
# Tables for Free Energy Correction and Thermodynamic Force on hydrogen and oxygen atoms
tabFEC_H = "FEC_H.dat"
tabFEC_O = "FEC_O.dat"
tabTHDF_H = "ThdForce_H.dat"
tabTHDF_O = "ThdForce_O.dat"
# Tables for potentials (see J. Chem. Theory Comput. 10, 816 (2014))
tabAngle = "POTS/tableESP_angle.dat"
tabBondHH = "POTS/tableESP_bondHH.dat"
tabBondOH = "POTS/tableESP_bondOH.dat"
tabHW_HW = "POTS/tableESP_HW_HW.dat"
tabHW_OW = "POTS/tableESP_HW_OW.dat"
tabOW_OW = "POTS/tableESP_OW_OW.dat"
num_Trotter_beads = len(x) # total number of Trotter beads in the system
num_atoms = len(x)//nTrotter # total number of atoms in the system
size = (Lx, Ly, Lz) # size
#####################
# 3. set up the system #
#####################
print('Setting up system...')
# System, boundary conditions, skin, communicator, node & cell grids
system = espressopp.System()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, interaction_cutoff, skin)
# Random number generator
rng = espressopp.esutil.RNG()
rng.seed(42)
system.rng = rng
# AdResS domain decomposition
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
##########################
# 4. add particles to system #
##########################
print('Adding particles and tuples...')
props = ['id', 'pos', 'v', 'f', 'pib', 'type', 'mass', 'adrat']
allParticlesAT = []
allParticles = []
tuples = []
# prepare trotter beads (add here these particles just temporarily)
for pid_trotter in range(num_Trotter_beads):
allParticlesAT.append([pid_trotter + 1,
Real3D(x[pid_trotter], y[pid_trotter], z[pid_trotter]), # position
Real3D(vx[pid_trotter], vy[pid_trotter], vz[pid_trotter]), # velocity
Real3D(0, 0, 0), # force
pid_trotter%nTrotter + 1, types[pid_trotter], masses[pid_trotter], 1]) # pib, type, mass, is AT particle
# create atoms
for pid_atom in range(num_atoms):
# Preparation of tuples (tuples define, which atoms/trotter beads belong to which CG molecules/atoms)
tmptuple = [pid_atom+num_Trotter_beads+1]
for pid_trotter in range(int(nTrotter)):
pid = pid_atom*nTrotter+pid_trotter
tmptuple.append((allParticlesAT[pid])[0])
firstParticleId=tmptuple[1]
cmp=allParticlesAT[firstParticleId-1][1]
cmv=allParticlesAT[firstParticleId-1][2]
# append atomistic particles
allParticles.append([pid_atom+num_Trotter_beads+1,
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(cmv[0], cmv[1], cmv[2]), # vel
Real3D(0, 0, 0), # force
0, types[pid_atom*nTrotter], masses[pid_atom*nTrotter], 0]) # pib, type, mass, is not AT particle
# append Trotter beads
for pid_trotter in range(int(nTrotter)):
pid = pid_atom*nTrotter+pid_trotter
allParticles.append([(allParticlesAT[pid])[0],
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # pib
(allParticlesAT[pid])[5], # type
(allParticlesAT[pid])[6], # mass
(allParticlesAT[pid])[7]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, *props)
# create FixedTupleList object and add the tuples
ftpl = espressopp.FixedTupleListAdress(system.storage)
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# decompose to distribute the particles correctly before adding interactions
system.storage.decompose()
###########################################
# 4. set up structure, interactions, and force field #
###########################################
print('Setting up interactions and force field...')
# create bond lists between atoms
bondsOH = []
bondsHH = []
for part in range(num_atoms//3):
bondsOH.append((num_Trotter_beads + 1 + 3*part, num_Trotter_beads + 1 + 3*part+1))
bondsOH.append((num_Trotter_beads + 1 + 3*part, num_Trotter_beads + 1 + 3*part+2))
bondsHH.append((num_Trotter_beads + 1 + 3*part+1, num_Trotter_beads + 1 + 3*part+2))
# add bonds between atoms
fplOH = espressopp.FixedPairList(system.storage)
fplHH = espressopp.FixedPairList(system.storage)
fplOH.addBonds(bondsOH)
fplHH.addBonds(bondsHH)
# create the adaptive resolution verlet list
vl = espressopp.VerletListAdress(system, cutoff=interaction_cutoff, adrcut=interaction_cutoff,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2], exclusionlist=bondsOH+bondsHH)
# create angle list between atoms
angles = []
for part in range(num_atoms//3):
angles.append((num_Trotter_beads + 1 + 3*part+1, num_Trotter_beads + 1 + 3*part, num_Trotter_beads + 1 + 3*part+2))
# add angles between atoms
ftl = espressopp.FixedTripleList(system.storage)
ftl.addTriples(angles)
# non-bonded potentials
# inteaction first (we need specific PI AdResS interaction type)
interNB = espressopp.interaction.VerletListPIadressTabulatedLJ(vl, ftpl, nTrotter, speedupInterAtom)
# QM potential
potOOqm = espressopp.interaction.Tabulated(itype=3, filename=tabOW_OW, cutoff=potential_cutoff)
potHOqm = espressopp.interaction.Tabulated(itype=3, filename=tabHW_OW, cutoff=potential_cutoff)
potHHqm = espressopp.interaction.Tabulated(itype=3, filename=tabHW_HW, cutoff=potential_cutoff)
interNB.setPotentialQM(type1=1, type2=1, potential=potOOqm)
interNB.setPotentialQM(type1=1, type2=0, potential=potHOqm)
interNB.setPotentialQM(type1=0, type2=0, potential=potHHqm)
# WCA potential
potOOcl = espressopp.interaction.LennardJones(epsilon=temp, sigma=0.25, shift='auto', cutoff=1.122462048309373*0.25)
interNB.setPotentialCL(type1=1, type2=1, potential=potOOcl)
# add interaction to system
system.addInteraction(interNB)
# bonded potentials
potBondHH = espressopp.interaction.Tabulated(itype=3, filename=tabBondHH)
potBondOH = espressopp.interaction.Tabulated(itype=3, filename=tabBondOH)
interBondedHH = espressopp.interaction.FixedPairListPIadressTabulated(system, fplHH, ftpl, potBondHH, nTrotter, speedupInterAtom)
interBondedOH = espressopp.interaction.FixedPairListPIadressTabulated(system, fplOH, ftpl, potBondOH, nTrotter, speedupInterAtom)
system.addInteraction(interBondedHH)
system.addInteraction(interBondedOH)
# angle potentials
potAngle = espressopp.interaction.TabulatedAngular(itype=3, filename=tabAngle)
interAngle = espressopp.interaction.FixedTripleListPIadressTabulatedAngular(system, ftl, ftpl, potAngle, nTrotter, speedupInterAtom)
system.addInteraction(interAngle)
#########################################
# 5. set up integration scheme and corrections #
#########################################
print('Setting up integration scheme (path integral-based adaptive resolution multiple time stepping integrator)...')
# path integral-based adaptive resolution multiple time stepping integrator
integrator = espressopp.integrator.PIAdressIntegrator(system=system, verletlist=vl, timestep=timestep_short, sSteps=multiplier_short_to_medium, mSteps=multiplier_medium_to_long, nTrotter=nTrotter, realKinMass=realkinmass, constKinMass=constkinmass, temperature=temp, gamma=gamma, centroidThermostat=centroidthermostat, CMDparameter=CMDparameter, PILE=PILE, PILElambda=PILElambda, CLmassmultiplier=clmassmultiplier, speedup=speedupFreezeRings, KTI=KTI)
# add Free Energy Correction for oxygen and hydrogen atoms
fec = espressopp.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2], ntrotter=nTrotter)
fec.addForce(itype=3, filename=tabFEC_O, type=1)
fec.addForce(itype=3, filename=tabFEC_H, type=0)
integrator.addExtension(fec)
# add Thermodynamic Force for oxygen and hydrogen atoms
thdf = espressopp.integrator.TDforce(system, vl)
thdf.addForce(itype=3, filename=tabTHDF_O, type=1)
thdf.addForce(itype=3, filename=tabTHDF_H, type=0)
integrator.addExtension(thdf)
# distribute path integral beads and atoms according to AdResS domain decomposition, place AT/CG particles into the ring polymers' centers of mass
espressopp.tools.AdressDecomp(system, integrator)
##########################
# 6. set up analysis routines #
##########################
print('Setting up analysis routines...')
# radius of gyration profiles
gyration_array_total_H = []
gyration_array_total_O = []
gyrationprofile = espressopp.analysis.RadGyrXProfilePI(system)
gyrationprofilegrid = 40
gyrationAdds_H = [0 for i in range(int(gyrationprofilegrid))]
gyrationAdds_O = [0 for i in range(int(gyrationprofilegrid))]
# OO rdf
rdf_array_total_OO = []
Adds_OO = 0.0
rdf_OO = espressopp.analysis.RDFatomistic(system, 1, 1, 0.75)
# OH rdf
rdf_array_total_OH = []
Adds_OH = 0.0
rdf_OH = espressopp.analysis.RDFatomistic(system, 1, 0, 0.75)
# HH rdf
rdf_array_total_HH = []
Adds_HH = 0.0
rdf_HH = espressopp.analysis.RDFatomistic(system, 0, 0, 0.75)
# grids and formatting for analysis routines
rdfgrid = 400
fmt_rdf = ' %12.8f %12.8f\n'
dr_rdf = Ly / (2.0*float(rdfgrid))
fmt_gyr = ' %12.8f %12.8f\n'
dr_gyr = Lx / float(gyrationprofilegrid)
# output log
outfile = open("esp.dat", "w")
###########################
# 7. print system information #
###########################
print('')
print("System setup done, information:")
print('')
print('PI-AdResS Center =', [Lx/2, Ly/2, Lz/2])
print('Size of high resolution full path integral region', ex_size)
print('Size of hybrid region', hy_size)
print('Trotter number =', integrator.getNtrotter())
print('Total number of Trotter beads =', num_Trotter_beads)
print('Total number of atoms =', num_atoms)
print('Atomistic density = %.4f' % (num_atoms / (Lx * Ly * Lz)))
print('Interaction cutoff =', interaction_cutoff)
print('Potential cutoff =', potential_cutoff)
print('Skin =', system.skin)
print('Short timestep =', integrator.getTimeStep())
print('Medium timestep =', integrator.getTimeStep() * integrator.getsStep())
print('Long timestep =', integrator.getTimeStep() * integrator.getmStep() * integrator.getsStep())
print('Outer steps =', steps)
print('Intervals =', intervals)
print('NodeGrid = %s' % (nodeGrid,))
print('CellGrid = %s' % (cellGrid,))
print('Temperature =', integrator.getTemperature())
print('Gamma =', integrator.getGamma())
print('Classical Mass Multiplier =', integrator.getClmassmultiplier())
print('Constant kinetic mass?', integrator.getConstKinMass())
print('Using (adaptive or constant) real kinetic masses?', integrator.getRealKinMass())
print('Path Integration Langevin Equation thermostating?', integrator.getPILE())
print('Path Integration Langevin Equation thermostating lambda =', integrator.getPILElambda())
print('Thermostating the centroid?', integrator.getCentroidThermostat())
print('CMD adiabadicity parameter =', integrator.getCMDparameter())
print('Running Kirkwood Thermodynamic Integration?', integrator.getKTI())
print('Using centers of mass in classical region for force calculations?', speedupInterAtom)
print('Freezing internal ring vibrations in classical region?', integrator.getSpeedup())
print('')
##################
# 8. run simulation #
##################
# timer, steps
nsteps = steps // intervals
start_time = pytime.process_time()
# output format for screen and file
print('Starting the integration loop...')
print('')
print('step, time (ps), temperature, E_bonds, E_angles, E_ringpolymer, E_nonbonded, E_kin, E_correction, E_total')
fmt = '%8d %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g'
# initial configuration analysis
Eb = interBondedOH.computeEnergy() + interBondedHH.computeEnergy()
EAng = interAngle.computeEnergy()
ELj= interNB.computeEnergy()
Ek = integrator.computeKineticEnergy()
T = Ek *2.0 / (0.00831451 * 3.0 * num_Trotter_beads)
EPI = integrator.computeRingEnergy()
Ecorr = fec.computeCompEnergy() + thdf.computeTDEnergy()
Etotal = Ek+Eb+EAng+ELj+EPI+Ecorr
outfile.write(fmt%(0, 0, T, Eb, EAng, EPI, ELj, Ek, Ecorr, Etotal))
print ((fmt%(0, 0, T, Eb, EAng, EPI, ELj, Ek, Ecorr, Etotal)))
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
time = step * timestep_short * multiplier_medium_to_long * multiplier_short_to_medium
Eb = interBondedOH.computeEnergy() + interBondedHH.computeEnergy()
EAng = interAngle.computeEnergy()
ELj= interNB.computeEnergy()
Ek = integrator.computeKineticEnergy()
T = Ek *2.0 / (0.00831451 * 3.0 * num_Trotter_beads)
EPI = integrator.computeRingEnergy()
Ecorr = fec.computeCompEnergy() + thdf.computeTDEnergy()
Etotal = Ek+Eb+EAng+ELj+EPI+Ecorr
outfile.write(fmt%(step, time, T, Eb, EAng, EPI, ELj, Ek, Ecorr, Etotal))
print((fmt%(step, time, T, Eb, EAng, EPI, ELj, Ek, Ecorr, Etotal)))
if s%10==0:
rdf_array_OO = rdf_OO.computePathIntegral(rdfgrid)
for i in range(len(rdf_array_OO)):
if(i>=len(rdf_array_total_OO)):
rdf_array_total_OO.append(rdf_array_OO[i])
else:
rdf_array_total_OO[i] += rdf_array_OO[i]
Adds_OO += 1.0
rdf_array_OH = rdf_OH.computePathIntegral(rdfgrid)
for i in range(len(rdf_array_OH)):
if(i>=len(rdf_array_total_OH)):
rdf_array_total_OH.append(rdf_array_OH[i])
else:
rdf_array_total_OH[i] += rdf_array_OH[i]
Adds_OH += 1.0
rdf_array_HH = rdf_HH.computePathIntegral(rdfgrid)
for i in range(len(rdf_array_HH)):
if(i>=len(rdf_array_total_HH)):
rdf_array_total_HH.append(rdf_array_HH[i])
else:
rdf_array_total_HH[i] += rdf_array_HH[i]
Adds_HH += 1.0
gyration_array_H = gyrationprofile.compute(gyrationprofilegrid, nTrotter, 0)
for i in range(len(gyration_array_H)):
if(i>=len(gyration_array_total_H)):
gyration_array_total_H.append(gyration_array_H[i])
else:
gyration_array_total_H[i] += gyration_array_H[i]
if(gyration_array_H[i] != 0.0):
gyrationAdds_H[i] += 1.0
gyration_array_O = gyrationprofile.compute(gyrationprofilegrid, nTrotter, 1)
for i in range(len(gyration_array_O)):
if(i>=len(gyration_array_total_O)):
gyration_array_total_O.append(gyration_array_O[i])
else:
gyration_array_total_O[i] += gyration_array_O[i]
if(gyration_array_O[i] != 0.0):
gyrationAdds_O[i] += 1.0
# close output file
outfile.close()
###################################################
# 8. postprocess gathered analysis outputs and print to file #
###################################################
# print O-O rdf to file
for i in range(len(rdf_array_total_OO)):
rdf_array_total_OO[i] /= Adds_OO
rdf_OO_file = open('rdf_profile_OO.dat', 'w')
for i in range(len(rdf_array_total_OO)):
rdf_OO_file.write(fmt_rdf % ( (i+0.5)*dr_rdf, rdf_array_total_OO[i] ))
rdf_OO_file.close()
# print O-H rdf to file
for i in range(len(rdf_array_total_OH)):
rdf_array_total_OH[i] /= Adds_OH
rdf_OH_file = open('rdf_profile_OH.dat', 'w')
for i in range(len(rdf_array_total_OH)):
rdf_OH_file.write(fmt_rdf % ( (i+0.5)*dr_rdf, rdf_array_total_OH[i] ))
rdf_OH_file.close()
# print H-H rdf to file
for i in range(len(rdf_array_total_HH)):
rdf_array_total_HH[i] /= Adds_HH
rdf_HH_file = open('rdf_profile_HH.dat', 'w')
for i in range(len(rdf_array_total_HH)):
rdf_HH_file.write(fmt_rdf % ( (i+0.5)*dr_rdf, rdf_array_total_HH[i] ))
rdf_HH_file.close()
# print hydrogen radius of gyration profile to file
for i in range(len(gyration_array_total_H)):
if(gyrationAdds_H[i] > 0.0):
gyration_array_total_H[i] /= gyrationAdds_H[i]
rgyr_H_file = open('radgyr_profile_H.dat', 'w')
for i in range(len(gyration_array_total_H)):
rgyr_H_file.write(fmt_gyr % ( (i+0.5)*dr_gyr, gyration_array_total_H[i] ))
rgyr_H_file.close()
# print oxygen radius of gyration profile to file
for i in range(len(gyration_array_total_O)):
if(gyrationAdds_O[i] > 0.0):
gyration_array_total_O[i] /= gyrationAdds_O[i]
rgyr_O_file = open('radgyr_profile_O.dat', 'w')
for i in range(len(gyration_array_total_O)):
rgyr_O_file.write(fmt_gyr % ( (i+0.5)*dr_gyr, gyration_array_total_O[i] ))
rgyr_O_file.close()
###########
# 9. Done #
###########
end_time = pytime.process_time()
print('Successfully finished simulation.')
print('Run time = %.1f seconds' % (end_time - start_time))
| espressopp/espressopp | examples/adress/piadress_water/AdaptivePIwater.py | Python | gpl-3.0 | 21,464 | [
"ESPResSo"
] | 90ee1482de389c15a690ff9518a201347ed9a920b4345bc157fabacaaf7d0326 |
##########################################################################################################
#Visualization of RNA ensembles
#getMap
#getMapDB
#getRef
#getRefDB
#main
##########################################################################################################
import sys
import argparse
import random
from ensemblerna.ErrorCheck import *
from ensemblerna.DBStructs import *
from ensemblerna.DBAnalysis import *
from ensemblerna.PlotVis import *
from ensemblerna.PlotInter import *
##########################################################################################################
#Function to create map of conformational space
#Input: fasta sequence, file header, output directory, map size, ignore flag
#Output: map positions, map 2D matrix, map sequences, map dot bracket structures
##########################################################################################################
def _getMap(fasta, header, dir, size, plotm, rg, rsp, thmax, ignore):
#initialize variables
header = re.split('/', header)
header = header[len(header)-1]
dir = dir
#get map structure
map = getMapSeqs(dir+'/', fasta, size, rg, rsp, thmax)
mapseqs = map['mapseqs']
mapinds = map['mapinds']
getMapStruct(dir, mapinds, header+'_map.db')
#encode map structure
bin_mat_2d = encodeStructsNested(dir+header+'_map', rg, ignore)
map2d = bin_mat_2d['norm2d_']
mapdb = bin_mat_2d['db_']
#get map frequency
nestfreq = getNestFreq(map2d)
mapfreq = nestfreq['freq']
mapnest = nestfreq['nest']
mapseqs = nestfreq['seqs']
maparr = nestfreq['arr']
#plot map
mappos = plotMap(maparr, mapfreq, mapnest, mapseqs, mapdb, map2d, dir+header+'_map', plotm)
#return map
return{'mappos':mappos, 'mapnest':mapnest, 'mapseqs':mapseqs, 'mapdb':mapdb, 'map2d':map2d, 'maparr':maparr}
##########################################################################################################
#Function to create map of conformational space from dot-bracket
#Input: dot-bracket file, file header, output directory, map size
#Output: map positions, map 2D matrix, map sequences, map dot bracket structures
##########################################################################################################
def _getMapDB(mapdb, header, dir, size, plotm, rg, ignore):
#initialize variables
header = re.split('/', header)
header = header[len(header)-1]
dir = dir
#get map structure
cmd = 'cat ' + mapdb + ' > ' + dir+header+'_map.db'
subprocess.check_output(cmd, shell=True)
#encode map structure
bin_mat_2d = encodeStructsNested(dir+header+'_map', rg, ignore)
map2d = bin_mat_2d['norm2d_']
mapdb = bin_mat_2d['db_']
#get map frequency
nestfreq = getNestFreq(map2d)
mapfreq = nestfreq['freq']
mapnest = nestfreq['nest']
mapseqs = nestfreq['seqs']
maparr = nestfreq['arr']
#plot map
mappos = plotMap(maparr, mapfreq, mapnest, mapseqs, mapdb, map2d, dir+header+'_map', plotm)
#return map
return{'mappos':mappos, 'mapnest':mapnest, 'mapseqs':mapseqs, 'mapdb':mapdb, 'map2d':map2d, 'maparr':maparr}
##########################################################################################################
#Function to create reference visualization
#Input: map object, fasta sequence, file header,output directory, ignore flag, number of samples, shape data
#Output: csv, db, pdf, png
##########################################################################################################
def _getRef(map, fasta, header, dir, rg, plotint, ignore, numsamp, shape=None):
#initialize variables
header = re.split('/', header)
header = header[len(header)-1]
dir = dir
#get structure
if shape is None:
getStruct(dir, header+'.db', numsamp)
else:
getStructSHAPE(dir, header+'.db', numsamp)
#read structure
bin_mat_2d = encodeStructsNested(dir+header, rg, ignore)
ref2d = bin_mat_2d['norm2d_']
refdb = bin_mat_2d['db_']
#get reference frequency
nestfreq = getNestFreq(ref2d)
reffreq = nestfreq['freq']
refnest = nestfreq['nest']
refseqs = nestfreq['seqs']
refarr = nestfreq['arr']
#get map
mappos = map['mappos']
mapnest = map['mapnest']
mapseqs = map['mapseqs']
mapdb = map['mapdb']
map2d = map['map2d']
maparr = map['maparr']
#plot reference
ref = plotRef(reffreq, refnest, refarr, mappos, mapnest, mapseqs, mapdb, maparr, map2d, dir+header, refdb, refseqs, rg)
#plot interactive
if plotint == 'T':
structs = ref['structs']
diversity = ref['diversity']
freq = ref['freq']
plotInteractive(dir, header, mappos, freq, structs, diversity, maparr, rg)
return True
##########################################################################################################
#Function to create reference visualization with dot bracket
#Input: map object, dot bracket, file header, ignore flag, output directory
#Output: csv, db, pdf, png
##########################################################################################################
def _getRefDB(map, db, header, dir, rg, plotint, ignore, md=None):
#initialize variables
header = re.split('/', header)
header = header[len(header)-1]
dir = dir
#remove map temporary files
if md is None:
cmd = 'rm ' + dir + '/temp*'
subprocess.check_output(cmd, shell=True)
#get reference structure
cmd = 'cat ' + db + ' > ' + dir+header+'.db'
subprocess.check_output(cmd, shell=True)
#read structure
bin_mat_2d = encodeStructsNested(dir+header, rg, ignore)
ref2d = bin_mat_2d['norm2d_']
refdb = bin_mat_2d['db_']
#get reference frequency
nestfreq = getNestFreq(ref2d)
reffreq = nestfreq['freq']
refnest = nestfreq['nest']
refseqs = nestfreq['seqs']
refarr = nestfreq['arr']
#get map
mappos = map['mappos']
mapnest = map['mapnest']
mapseqs = map['mapseqs']
mapdb = map['mapdb']
map2d = map['map2d']
maparr = map['maparr']
#plot reference
ref = plotRef(reffreq, refnest, refarr, mappos, mapnest, mapseqs, mapdb, maparr, map2d, dir+header, refdb, refseqs, rg)
#plot interactive
if plotint == 'T':
structs = ref['structs']
diversity = ref['diversity']
freq = ref['freq']
plotInteractive(dir, header, mappos, freq, structs, diversity, maparr, rg)
return True
#runs main part of script
def main():
#check programs loaded
checkRNAStructCMD()
#set seed
random.seed(113)
#parse command line
parser = argparse.ArgumentParser(prog='EnsembleRNA', usage='ensemblerna <fasta file> <output directory> [options]', description='Visualize the structural ensemble for a given RNA. For more information please see the README file, the Documentation file, or visit http://ribosnitch-ensemblerna.rhcloud.com')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
parser.add_argument('input', metavar='fasta', type=str, help='Reference fasta file. Maximum sequence length is 2500 nucleotides (Required)')
parser.add_argument('output', metavar='outdir', type=str, help='Output directory (Required)')
parser.add_argument('-sh', metavar='--shape', type=str, dest='shape_file', default=None, help='Includes shape data in the reference ensemble prediction. Ignored if -d flag is used (Default is None)')
parser.add_argument('-d', metavar='--db', type=str, dest='db_file', default=None, help='Dot-bracket structures for reference ensemble (Default is None)')
parser.add_argument('-m', metavar='--map', type=str, dest='map_file', default=None, help='Sequence to create the map of conformational space. Ignored if -md flag is used (Default is reference fasta file)')
parser.add_argument('-md', metavar='--mapdb', type=str, dest='map_dbfile', default=None, help='Dot-bracket structures for the map of conformational space. A previously created map can be used to project new ensembles onto the same space. (Default is None)')
parser.add_argument('-s', metavar='--size', dest='size', type=int, default=10, help='Number of sequences for the map of conformational space. Higher numbers increase structural diversity. Ignored if -md flag is used (Default is 10)')
parser.add_argument('-p', metavar='--plotmap', dest='plotm', type=str, choices=['T', 'F'], default='T', help='Plot the map T/F (Default is T)')
parser.add_argument('-r', metavar='--range', dest='nucrg', type=int, nargs=2, default=None, help='Range of nucleotides to visualize. Predicted structures will still include the full length of the input RNA, but only the given range will be plotted (Default is 1 to sequence length)')
parser.add_argument('-maxd', metavar='--maxdistance', dest='maxd', type=int, default=None, help='Maximum number of bases between the two nucleotides in a pair (Default is no restriction)')
parser.add_argument('-t', metavar='--temperature', dest='tempcalc', type=int, default=None, help='Temperature at which the calculation takes place in Kelvin (Default is 310.15 K)')
parser.add_argument('-si', metavar='--SHAPEintercept', dest='sint', type=int, default=None, help='Intercept used with SHAPE restraints. Ignored if -d flag is used (Default is -0.6 kcal/mol)')
parser.add_argument('-sm', metavar='--SHAPEslope', dest='slope', type=int, default=None, help='Slope used with SHAPE restraints. Ignored if -d flag is used (Default is 1.8 kcal/mol)')
parser.add_argument('-pi', metavar='--plotinteractive', dest='plotint', type=str, choices=['T', 'F'], default='T', help='Plot the interactive file T/F (Default is T)')
parser.add_argument('-th', metavar='--threadmax', dest='thmax', type=int, default=1, help='Maximum number of threads for multi-threading. (Default is 1)')
parser.add_argument('-i', metavar='--ignorestems', dest='ignore', type=int, default=3, help='Ignore stems with fewer than i base pairs. (Default is 3)')
parser.add_argument('-n', metavar='--num', dest='numsamp', type=int, default=1000, help='Number of Boltzmann sampled structures to produce for the visualization. (Default is 1000)')
args = parser.parse_args()
fasta_file = args.input
outdir = args.output
shape_file = args.shape_file
map_file = args.map_file
size = args.size
db_file = args.db_file
map_dbfile = args.map_dbfile
plotm = args.plotm
nucrg = args.nucrg
maxd = args.maxd
sint = args.sint
slope = args.slope
tempcalc = args.tempcalc
plotint = args.plotint
thmax = args.thmax
ignore = args.ignore
numsamp = args.numsamp
#initialize variables
header = fasta_file.split('/')[-1]
header = header.split('.')[0]
#edit outdir
if(outdir[-1] == '/'):
outdir = outdir[:-1]
#read fasta file
fasta = checkFasta(fasta_file)
#check inputs for errors
outdir = checkDir(outdir)
if nucrg == None:
rg = range(0,len(fasta),1)
else:
rg = checkRange(nucrg, len(fasta))
checkSize(size, len(fasta), rg)
checkIgnore(ignore, len(fasta), rg)
checkThmax(thmax)
checkNumsamp(numsamp)
#check RNAstructure parameters
rsp = ''
ssp = ''
if maxd is not None:
rsp = rsp+' -md '+str(maxd)
if tempcalc is not None:
rsp = rsp+' -t '+str(tempcalc)
if sint is not None:
ssp = ssp+' -si '+str(sint)
if sint is not None:
ssp = ssp+' -sm '+str(slope)
#check reference
if db_file is None:
#choose shape function (shape or none)
if shape_file is None:
shape = None
checkRNAStruct(fasta, outdir, rsp)
else:
shape = 1
checkSHAPE(shape_file, outdir)
checkRNAStructSHAPE(fasta, outdir, rsp, ssp)
else:
#check dot bracket
checkDB(db_file, outdir, len(fasta))
#choose map function (db or fasta)
if map_dbfile is not None:
#check dot bracket
checkDB(map_dbfile, outdir, len(fasta))
#get map
map = _getMapDB(map_dbfile, header, outdir, size, plotm, rg)
else:
#read map file
if map_file is not None:
mfasta = checkFasta(map_file, len(fasta))
else:
mfasta = fasta
#get map
map = _getMap(mfasta, header, outdir, size, plotm, rg, rsp, thmax, ignore)
#choose reference function (db or fasta)
if db_file is not None:
#visualize reference
if map_dbfile is None:
ref = _getRefDB(map, db_file, header, outdir, rg, plotint, ignore, numsamp)
else:
ref = _getRefDB(map, db_file, header, outdir, rg, plotint, ignore, md=1)
else:
#visualize reference
ref = _getRef(map, fasta, header, outdir, rg, plotint, ignore, numsamp, shape)
| cbtolson/ensemblerna_package | ensemblerna/main.py | Python | gpl-3.0 | 13,070 | [
"VisIt"
] | 9d7ca5beb1755ca28ef041bd83d8353390ca2ada42062a430ad253d38a49d004 |
import os
import re
import logging
import urllib2
import requests
import pandas as pd
import pydap.client
import pydap.exceptions
import xray
from bs4 import BeautifulSoup
from collections import namedtuple
from contextlib import closing
from datetime import datetime
from requests.exceptions import ConnectionError
from shutil import move
# Support Python 2.7 and 3.x
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
from exceptions import RequiredVariableNotPresent
class ArgoData(object):
'''Collection of methods for working with Argo profiling float data.
'''
# Jupyter Notebook defines a root logger, use that if it exists
if logging.getLogger().handlers:
_notebook_handler = logging.getLogger().handlers[0]
logger = logging.getLogger()
else:
logger = logging.getLogger(__name__)
_handler = logging.StreamHandler()
_formatter = logging.Formatter('%(levelname)s %(asctime)s %(filename)s '
'%(funcName)s():%(lineno)d %(message)s')
_handler.setFormatter(_formatter)
logger.addHandler(_handler)
_log_levels = (logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG)
# Literals for groups stored in local HDF file cache
_STATUS = 'status'
_GLOBAL_META = 'global_meta'
_BIO_PROFILE_INDEX = 'bio_global_index'
_ALL_WMO_DF = 'all_wmo_df'
_OXY_COUNT_DF = 'oxy_count_df'
_coordinates = {'PRES_ADJUSTED', 'LATITUDE', 'LONGITUDE', 'JULD'}
# Names and search patterns for cache file naming/parsing
# Make private and ignore pylint's complaints
# No other names in this class can end in 'RE'
_fixed_cache_base = 'biofloat_fixed_cache'
_ageRE = 'age([0-9]+)'
_profilesRE = 'profiles([0-9]+)'
_pressureRE = 'pressure([0-9]+)'
_wmoRE = 'wmo([0-9-]+)'
_variablesRE = 'var([0-9-]+)'
_MAX_VALUE = 10000000000
_compparms = dict(complib='zlib', complevel=9)
# PyTables: Use non-empty minimal df to minimize HDF file size
_blank_df = pd.DataFrame([pd.np.nan])
def __init__(self, verbosity=0, cache_file=None, bio_list=('DOXY_ADJUSTED',),
status_url='http://argo.jcommops.org/FTPRoot/Argo/Status/argo_all.txt',
global_url='ftp://ftp.ifremer.fr/ifremer/argo/ar_index_global_meta.txt',
thredds_url='http://tds0.ifremer.fr/thredds/catalog/CORIOLIS-ARGO-GDAC-OBS',
variables=('TEMP_ADJUSTED', 'PSAL_ADJUSTED', 'DOXY_ADJUSTED')):
'''Initialize ArgoData object.
Args:
verbosity (int): range(4), default=0
cache_file (str): Defaults to biofloat_default_cache.hdf in users
home directory
bio_list (list): List of required bio variables, e.g.:
['DOXY_ADJUSTED', 'CHLA', 'BBP700', 'CDOM', 'NITRATE']
status_url (str): Source URL for Argo status data, defaults to
http://argo.jcommops.org/FTPRoot/Argo/Status/argo_all.txt
global_url (str): Source URL for DAC locations, defaults to
ftp://ftp.ifremer.fr/ifremer/argo/ar_index_global_meta.txt
thredds_url (str): Base URL for THREDDS Data Server, defaults to
http://tds0.ifremer.fr/thredds/catalog/CORIOLIS-ARGO-GDAC-OBS
variables (list): Variables to extract from NetCDF files and put
into the Pandas DataFrame
cache_file (str):
There are 3 kinds of cache files:
1. The default file named biofloat_cache.hdf that is automatically
placed in the user's home directory. It will cache whatever
data is requested via call to get_float_dataframe().
2. Specially named cache_files produced by the load_biofloat_cache.py
script. These files are built with constraints and are fixed.
Once built they can be used in a read-only fashion to work on
only the data they contain. Calls to get_float_dataframe().
will not add more data to these "fixed" cache files.
3. Custom cache file names. These operate just like the default cache
file, but can be named whatever the user wants.
'''
self.status_url = status_url
self.global_url = global_url
self.thredds_url = thredds_url
self.variables = set(variables)
self.logger.setLevel(self._log_levels[verbosity])
self._bio_list = bio_list
if cache_file:
self.cache_file_parms = self._get_cache_file_parms(cache_file)
if self.cache_file_parms:
self.logger.info('Using fixed cache file parms: %s',
self.cache_file_parms)
self.cache_file = cache_file
else:
# Write default cache to users home directory
self.cache_file = os.path.abspath(os.path.join(
os.path.expanduser('~'),
'biofloat_default_cache.hdf'))
self.logger.info('Using cache_file %s', self.cache_file)
def _put_df(self, df, name, metadata=None):
'''Save Pandas DataFrame to local HDF file with optional metadata dict.
'''
store = pd.HDFStore(self.cache_file)
self.logger.debug('Saving DataFrame to name "%s" in file %s',
name, self.cache_file)
if df.dropna().empty:
store.put(name, df, format='fixed')
else:
##store.append(name, df, format='table', **self._compparms)
store.put(name, df, format='fixed')
if metadata and store.get_storer(name):
store.get_storer(name).attrs.metadata = metadata
self.logger.debug('store.close()')
store.close()
def _get_df(self, name):
'''Return tuple of Pandas DataFrame and metadata dictionary.
'''
store = pd.HDFStore(self.cache_file)
try:
self.logger.debug('Getting "%s" from %s', name, self.cache_file)
df = store[name]
try:
metadata = store.get_storer(name).attrs.metadata
except AttributeError:
metadata = None
except (IOError, KeyError):
raise
finally:
self.logger.debug('store.close()')
store.close()
return df, metadata
def _remove_df(self, name):
'''Remove name from cache file
'''
with pd.HDFStore(self.cache_file) as store:
self.logger.debug('Removing "%s" from %s', name, self.cache_file)
store.remove(name)
def _status_to_df(self):
'''Read the data at status_url link and return it as a Pandas DataFrame.
'''
self.logger.info('Reading data from %s', self.status_url)
req = requests.get(self.status_url)
req.encoding = 'UTF-16LE'
# Had to tell requests the encoding, StringIO makes the text
# look like a file object. Skip over leading BOM bytes.
df = pd.read_csv(StringIO(req.text[1:]))
return df
def _ftp_csv_to_df(self, url, date_columns=[]):
'''Read the data at url link and return it as a Pandas DataFrame.
'''
self.logger.info('Reading data from %s', url)
with closing(urllib2.urlopen(url)) as r:
df = pd.read_csv(r, comment='#', parse_dates=date_columns)
return df
def _get_pressures(self, ds, max_pressure, nprof=0):
'''From xray ds return tuple of pressures list and pres_indices list.
'''
pressures = []
pres_indices = []
for i, p in enumerate(ds['PRES_ADJUSTED'].values[nprof]):
if p >= max_pressure:
break
pressures.append(p)
pres_indices.append(i)
if not pressures:
self.logger.warn('No PRES_ADJUSTED values in netCDF file')
return pressures, pres_indices
def _multi_indices(self, wmo, ds, max_pressure, profile, nprof=0):
'''Return list of tuples that is used for Pandas MultiIndex hireachical
index and indices to pressure variable.
'''
pressures = []
pres_indices = []
try:
pressures, pres_indices = self._get_pressures(ds, max_pressure, nprof)
except pydap.exceptions.ServerError as e:
self.logger.error(e)
except IndexError:
self.logger.warn('Profile [%s] does not exist', nprof)
# Make a DataFrame with a hierarchical index for better efficiency
# Argo data have a N_PROF dimension always of length 1, hence the [0]
tuples = [(wmo, ds['JULD'].values[nprof], ds['LONGITUDE'].values[nprof],
ds['LATITUDE'].values[nprof], profile, round(pres, 2))
for pres in pressures]
indices = pd.MultiIndex.from_tuples(tuples,
names=['wmo', 'time', 'lon', 'lat', 'profile', 'pressure'])
return indices, pres_indices
def _build_profile_dataframe(self, wmo, url, ds, max_pressure, profile, nprof):
'''Return DataFrame containing the variables from N_PROF column in
url specified by nprof integer (0,1).
'''
df = pd.DataFrame()
# Add only non-coordinate variables to the DataFrame
for v in self.variables:
try:
indices, pres_indices = self._multi_indices(wmo, ds,
max_pressure, profile, nprof)
s = pd.Series(ds[v].values[nprof][pres_indices], index=indices)
self.logger.debug('Added %s to DataFrame', v)
df[v] = s
except (KeyError, TypeError):
self.logger.warn('%s not in %s', v, url)
except pydap.exceptions.ServerError as e:
self.logger.error(e)
return df
def _profile_to_dataframe(self, wmo, url, key, max_pressure):
'''Return a Pandas DataFrame of profiling float data from data at url.
Examine data at url for variables in self._bio_list that may be in
the lower vertical resolution [1] N_PROF array.
'''
df = self._blank_df
try:
self.logger.debug('Opening %s', url)
ds = xray.open_dataset(url)
except pydap.exceptions.ServerError:
self.logger.error('ServerError opening %s', url)
return df
except Exception as e:
self.logger.error('Error opening %s: %s', url, str(e))
return df
self.logger.debug('Checking %s for our desired variables', url)
for v in self._coordinates.union(self.variables):
if v not in ds.keys():
raise RequiredVariableNotPresent('{} not in {}'.format(v, url))
profile = int(key.split('P')[1])
df = self._build_profile_dataframe(wmo, url, ds, max_pressure,
profile, nprof=0)
# Check for required bio variables - should only the low resolution
# data be returned or should the high resolution T/S data be
# concatenated with the lower vertical resolution variables?
for var in self._bio_list:
if df[var].dropna().empty:
self.logger.warn('%s: N_PROF [0] empty, trying [1]', var)
df = self._build_profile_dataframe(wmo, url, ds, max_pressure,
profile, nprof=1)
return df
def _get_update_datetime(self, url):
'''Return python datetime of DATE_UPDATE variable from NetCDF file at url
'''
dt = None
try:
self.logger.debug('Opening %s', url)
ds = xray.open_dataset(url)
except pydap.exceptions.ServerError:
self.logger.error('ServerError opening %s', url)
return dt
dt = datetime.strptime(ds['DATE_UPDATE'].values, '%Y%m%d%H%M%S')
return dt
def _float_profile_key(self, url):
'''Return last part of url as key that serves as a PyTables/HDF
group name: WMO_<wmo>/P<profilenumber>. The parent group WMO_<wmo>
must be created before this key can be used to put data.
'''
regex = re.compile(r"([a-zA-Z]+)(\d+_\d+).nc$")
m = regex.search(url)
key = '/WMO_{:s}'.format(m.group(2).replace('_', '/P'))
code = m.group(1)
return key, code
def set_verbosity(self, verbosity):
'''Change loglevel. 0: ERROR, 1: WARN, 2: INFO, 3:DEBUG.
'''
self.logger.setLevel(self._log_levels[verbosity])
def get_oxy_floats_from_status(self, age_gte=340):
'''Return a Pandas Series of floats that are identified to have oxygen,
are not greylisted, and have an age greater or equal to age_gte.
Args:
age_gte (int): Restrict to floats with data >= age, defaults to 340
'''
try:
df, _ = self._get_df(self._STATUS)
except (IOError, KeyError):
self.logger.debug('Could not read status from cache, loading it.')
self._put_df(self._status_to_df(), self._STATUS)
df, _ = self._get_df(self._STATUS)
odf = df.query('(OXYGEN == 1) & (GREYLIST == 0) & (AGE != 0) & '
'(AGE >= {:d})'.format(age_gte))
return odf['WMO'].tolist()
def get_dac_urls(self, wmo_list):
'''Return dictionary of Data Assembly Centers keyed by wmo number.
Args:
wmo_list (list[str]): List of strings of float numbers
'''
try:
df, _ = self._get_df(self._GLOBAL_META)
except KeyError:
self.logger.debug('Could not read global_meta, putting it into cache.')
self._put_df(self._ftp_csv_to_df(self.global_url,
date_columns=['date_update']), self._GLOBAL_META)
df, _ = self._get_df(self._GLOBAL_META)
dac_urls = {}
for _, row in df.loc[:,['file']].iterrows():
wmo = row['file'].split('/')[1]
if wmo in wmo_list:
url = self.thredds_url
url += '/'.join(row['file'].split('/')[:2])
url += "/profiles/catalog.xml"
dac_urls[wmo] = url
self.logger.debug('Found %s dac_urls', len(dac_urls))
return dac_urls
def get_bio_profile_index(self,
url='ftp://ftp.ifremer.fr/ifremer/argo/argo_bio-profile_index.txt'):
'''Return Pandas DataFrame of data at url
'''
try:
df, _ = self._get_df(self._BIO_PROFILE_INDEX)
except KeyError:
self.logger.debug('Adding %s to cache', self._BIO_PROFILE_INDEX)
self._put_df(self._ftp_csv_to_df(url, date_columns=['date', 'date_update']),
self._BIO_PROFILE_INDEX)
df, _ = self._get_df(self._BIO_PROFILE_INDEX)
return df
def _sort_opendap_urls(self, urls):
'''Organize list of Argo OpenDAP URLs so that 'D' Delayed Mode or
urls that contain 'D' appear before 'R' Realtime ones.
'''
durls = []
hasdurls = []
mrurls = []
rurls = []
for url in urls:
regex = re.compile(r"([a-zA-Z]+)\d+_\d+.nc$")
try:
code = regex.search(url).group(1).upper()
except AttributeError:
continue
if 'D' == code:
durls.append(url)
elif 'D' in code:
hasdurls.append(url)
elif 'MR' == code:
mrurls.append(url)
else:
rurls.append(url)
# Return each group in reverse order, as they appear on the TDS
return sorted(durls, reverse=True) + sorted(hasdurls, reverse=True
) + sorted(mrurls, reverse=True) + sorted(rurls, reverse=True)
def get_profile_opendap_urls(self, catalog_url):
'''Returns list of opendap urls for the profiles in catalog. The
list is ordered with Delayed mode versions before Realtime ones.
The `catalog_url` is the .xml link for a directory on a THREDDS Data
Server.
'''
urls = []
try:
self.logger.info("Checking for updates at %s", catalog_url)
req = requests.get(catalog_url)
except ConnectionError as e:
self.logger.error('Cannot open catalog_url = %s', catalog_url)
self.logger.exception(e)
return urls
soup = BeautifulSoup(req.text, 'html.parser')
# Expect that this is a standard TDS with dodsC used for OpenDAP
base_url = '/'.join(catalog_url.split('/')[:4]) + '/dodsC/'
# Pull out <dataset ... urlPath='...nc'> attributes from the XML
for e in soup.findAll('dataset', attrs={'urlpath': re.compile("nc$")}):
urls.append(base_url + e['urlpath'])
return self._sort_opendap_urls(urls)
def _get_cache_file_parms(self, cache_file):
'''Return dictionary of constraint parameters from name of fixed cache file.
'''
parm_dict = {}
if self._fixed_cache_base in cache_file:
for regex in [a for a in dir(self) if not callable(a) and
a.endswith("RE")]:
try:
p = re.compile(self.__getattribute__(regex))
m = p.search(cache_file)
parm_dict[regex[1:-2]] = m.group(1)
except AttributeError:
pass
return parm_dict
def _validate_cache_file_parm(self, parm, value):
'''Return adjusted parm value so as not to exceed fixed cache file value.
'''
adjusted_value = value
cache_file_value = None
try:
cache_file_value = self.cache_file_parms[parm]
except KeyError:
if isinstance(value, int) and not adjusted_value:
# Return a ridiculously large integer to force reading all data
adjusted_value = self._MAX_VALUE
except AttributeError:
# No cache_file sepcified
pass
if value and cache_file_value:
if isinstance(value, int):
if value > cache_file_value:
self.logger.warn("Requested %s %s exceeds cache file's parameter: %s",
parm, value, cache_file_value)
self.logger.info("Setting %s to %s", parm, cache_file_value)
adjusted_value = int(cache_file_value)
else:
if not set(value) <= set(cache_file_value.split('-')):
self.logger.warn("Requested item(s) %s %s not in fixed cache file: %s",
parm, set(value), cache_file_value)
adjusted_value = cache_file_value.split('-')
elif not value and cache_file_value:
self.logger.info("Using fixed cache file's %s value of %s", parm,
cache_file_value)
if parm == 'wmo':
adjusted_value = cache_file_value.split('-')
else:
adjusted_value = int(cache_file_value)
if not adjusted_value:
# Final check for value = None and not set by cache_file
if not isinstance(value, (list, tuple)):
adjusted_value = self._MAX_VALUE
return adjusted_value
def _validate_oxygen(self, df, url, var_name='DOXY_ADJUSTED'):
'''Return blank DataFrame if no valid oxygen otherwise return df.
'''
if df[var_name].dropna().empty:
self.logger.warn('Oxygen is all NaNs in %s', url)
df = self._blank_df
return df
def _save_profile(self, url, count, opendap_urls, wmo, key, code,
max_pressure, float_msg, max_profiles):
'''Put profile data into the local HDF cache.
'''
m_t = '{}, Profile {} of {}, key = {}, code = {}'
m_t_mp = '{}, Profile {} of {}({}), key = {}, code = {}'
msg = m_t.format(float_msg, count + 1, len(opendap_urls), key, code)
try:
if max_profiles != self._MAX_VALUE:
msg = m_t_mp.format(float_msg, count + 1, len(opendap_urls),
max_profiles, key, code)
except NameError:
pass
try:
self.logger.info(msg)
df = self._profile_to_dataframe(wmo, url, key, max_pressure).dropna()
if df.empty:
df = self._blank_df
else:
if 'DOXY_ADJUSTED' in self._bio_list:
df = self._validate_oxygen(df, url, 'DOXY_ADJUSTED')
elif 'DOXY' in self._bio_list:
df = self._validate_oxygen(df, url, 'DOXY')
except RequiredVariableNotPresent as e:
self.logger.warn(str(e))
df = self._blank_df
except KeyError as e:
self.logger.error(str(e))
df = self._blank_df
self._put_df(df, key, dict(url=url, dateloaded=datetime.utcnow()))
return df
def _get_data_from_argo(self, wmo_list, max_profiles=None, max_pressure=None,
append_df=True, update_delayed_mode=False):
'''Query Argo web resources for all the profile data for floats in
wmo_list. Return DataFrame.
'''
max_profiles = self._validate_cache_file_parm('profiles', max_profiles)
max_pressure = self._validate_cache_file_parm('pressure', max_pressure)
max_wmo_list = self._validate_cache_file_parm('wmo', wmo_list)
float_df = pd.DataFrame()
for f, (wmo, dac_url) in enumerate(self.get_dac_urls(max_wmo_list).iteritems()):
float_msg = 'WMO_{}: Float {} of {}'. format(wmo, f+1, len(max_wmo_list))
opendap_urls = self.get_profile_opendap_urls(dac_url)
for i, url in enumerate(opendap_urls):
if i >= max_profiles:
self.logger.info('Stopping at max_profiles = %s', max_profiles)
break
try:
key, code = self._float_profile_key(url)
except AttributeError:
continue
try:
df, m = self._get_df(key)
self.logger.debug(m['url'])
if 'D' in code.upper() and update_delayed_mode:
DATE_UPDATED = self._get_update_datetime(url)
if DATE_UPDATED:
if m['dateloaded'] < DATE_UPDATED:
self.logger.info(
'Replacing %s as dateloaded time of %s'
' is before DATE_UPDATED time of %s',
key, m['dateloaded'], DATE_UPDATED)
self._remove_df(key)
raise KeyError
except KeyError:
df = self._save_profile(url, i, opendap_urls, wmo, key, code,
max_pressure, float_msg, max_profiles)
self.logger.debug(df.head())
if append_df and not df.dropna().empty:
float_df = float_df.append(df)
return float_df
def _get_data_from_cache(self, wmo_list, wmo_df, max_profiles=None):
'''Return DataFrame of data in the cache file without querying Argo
'''
max_profiles = self._validate_cache_file_parm('profiles', max_profiles)
# TODO: Make sure all in wmo_list is in max_wmo_list
##max_wmo_list = self._validate_cache_file_parm('wmo', wmo_list)
float_df = pd.DataFrame()
for f, wmo in enumerate(wmo_list):
rows = wmo_df.loc[wmo_df['wmo'] == wmo, :]
for i, (_, row) in enumerate(rows.iterrows()):
if i >= max_profiles:
self.logger.info('%s stopping at max_profiles = %s', wmo, max_profiles)
break
try:
key, code = self._float_profile_key(row['url'])
except AttributeError:
continue
self.logger.debug('Float %s of %s, Profile %s of %s: %s',
f+1, len(wmo_list), i+1, len(rows), key)
df, _ = self._get_df(key)
if not df.dropna().empty:
float_df = float_df.append(df)
return float_df
def get_float_dataframe(self, wmo_list, max_profiles=None, max_pressure=None,
append_df=True, update_delayed_mode=False,
update_cache=True):
'''Returns Pandas DataFrame for all the profile data from wmo_list.
Uses cached data if present, populates cache if not present. If
max_profiles limits the number of profiles returned per float,
this is useful for testing or for getting just
the most recent profiles from the float. To load only surface data
set a max_pressure value. Set append_df to False if calling simply
to load cache_file (reduces memory requirements). Set update_delayed_mode
to True to reload into the cache updated delayed mode data. If
update_cache is True then each DAC will be queried for new profile
data, which can take some time; for reading just data from the cache
set update_cache=False.
'''
if update_cache:
df = self._get_data_from_argo(wmo_list, max_profiles, max_pressure,
append_df, update_delayed_mode)
else:
wmo_df = self.get_profile_metadata(flush=False)
df = self._get_data_from_cache(wmo_list, wmo_df, max_profiles)
return df
def _build_profile_metadata_df(self, wmo_dict):
'''Read metadata from .hdf file to return a DataFrame of the metadata
for each profile name in wmo_dict.
'''
profiles = []
url_hash = {}
Profile = namedtuple('profile', 'wmo name url code dateloaded')
with pd.HDFStore(self.cache_file, mode='r+') as f:
self.logger.debug('Building wmo_df by scanning %s', self.cache_file)
for name, wmo in wmo_dict.iteritems():
m = f.get_storer(name).attrs.metadata
_, code = self._float_profile_key(m['url'])
profiles.append(Profile(wmo, name, m['url'], code, m['dateloaded']))
url_hash[m['url']] = Profile(wmo, name, m['url'], code, m['dateloaded'])
# Sort profiles in code order: D, MR, and the rest
sorted_profiles = []
for url in self._sort_opendap_urls(url_hash.keys()):
sorted_profiles.append(url_hash[url])
df = pd.DataFrame.from_records(sorted_profiles, columns=profiles[0]._fields)
return df
def get_profile_metadata(self, flush=False):
'''Return DataFrame of all profile metadata in the cache file
'''
if flush:
try:
with pd.HDFStore(self.cache_file, mode='r+') as s:
s.remove(self._ALL_WMO_DF)
except KeyError:
pass
try:
with pd.HDFStore(self.cache_file, mode='r+') as s:
wmo_df = s[self._ALL_WMO_DF]
self.logger.debug('Read %s from cache', self._ALL_WMO_DF)
except (KeyError, TypeError):
self.logger.debug('Building float_dict by scanning %s', self.cache_file)
with pd.HDFStore(self.cache_file, mode='r+') as f:
float_dict = {g: g.split('/')[1].split('_')[1]
for g in sorted(f.keys()) if g.startswith('/WMO')}
wmo_df = self._build_profile_metadata_df(float_dict)
self.logger.info('Putting %s into cache', self._ALL_WMO_DF)
with pd.HDFStore(self.cache_file, mode='r+') as s:
s.put(self._ALL_WMO_DF, wmo_df, format='fixed')
return wmo_df
def get_cache_file_all_wmo_list(self, flush=False):
'''Return wmo numbers of all the floats in the cache file. Has side
effect of storing DataFrame of all profile urls indexed by wmo number.
'''
wmo_df = self.get_profile_metadata(flush)
return wmo_df['wmo'].unique().tolist()
def get_cache_file_oxy_count_df(self, max_profiles=None, flush=False):
'''Return DataFrame of profile and measurment counts for each float
that contains oxygen data in the cache file. Limit loading additional
profiles by setting max_profiles.
'''
oxy_count_df = pd.DataFrame()
if flush:
try:
with pd.HDFStore(self.cache_file, mode='r+') as s:
s.remove(self._OXY_COUNT_DF)
except KeyError:
pass
try:
with pd.HDFStore(self.cache_file, mode='r+') as s:
oxy_count_df = s[self._OXY_COUNT_DF]
self.logger.info('Read %s from cache', self._OXY_COUNT_DF)
except KeyError:
oxy_hash = {}
for wmo in self.get_cache_file_all_wmo_list(flush=False):
self.logger.info('Getting %s from cache', wmo)
df = self.get_float_dataframe([wmo], max_profiles, update_cache=False)
try:
if not df['DOXY_ADJUSTED'].dropna().empty:
odf = df.dropna().xs(wmo, level='wmo')
oxy_hash[wmo] = (
len(odf.index.get_level_values('time').unique()),
len(odf))
except (KeyError, AttributeError):
pass
try:
if not df['DOXY'].dropna().empty:
odf = df.dropna().xs(wmo, level='wmo')
oxy_hash[wmo] = (
len(odf.index.get_level_values('time').unique()),
len(odf))
except (KeyError, AttributeError):
pass
num_profiles = pd.Series([v[0] for v in oxy_hash.values()])
num_measurements = pd.Series([v[1] for v in oxy_hash.values()])
oxy_count_df = pd.DataFrame(dict(wmo = pd.Series(oxy_hash.keys()),
num_profiles = num_profiles,
num_measurements = num_measurements))
self.logger.info('Putting %s into cache', self._OXY_COUNT_DF)
with pd.HDFStore(self.cache_file, mode='r+') as s:
s.put(self._OXY_COUNT_DF, oxy_count_df, format='fixed')
return oxy_count_df
| biofloat/biofloat | biofloat/ArgoData.py | Python | mit | 31,176 | [
"NetCDF"
] | 64e0f56463e23df0b93aaf55d00ed86a57e49a256bd114d78b0c482b981534c0 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"Find unused objects and remove with the user's permission."
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from __future__ import with_statement
from gramps.gen.ggettext import gettext as _
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".RemoveUnused")
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gen.errors import WindowActiveError
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class RemoveUnused(tool.Tool, ManagedWindow, UpdateCallback):
MARK_COL = 0
OBJ_ID_COL = 1
OBJ_NAME_COL = 2
OBJ_TYPE_COL = 3
OBJ_HANDLE_COL = 4
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.title = _('Unused Objects')
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
ManagedWindow.__init__(self, uistate,[], self.__class__)
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
self.uistate = uistate
self.tables = {
'events' : {'get_func': self.db.get_event_from_handle,
'remove' : self.db.remove_event,
'get_text': self.get_event_text,
'editor' : 'EditEvent',
'stock' : 'gramps-event',
'name_ix' : 4},
'sources' : {'get_func': self.db.get_source_from_handle,
'remove' : self.db.remove_source,
'get_text': None,
'editor' : 'EditSource',
'stock' : 'gramps-source',
'name_ix' : 2},
'places' : {'get_func': self.db.get_place_from_handle,
'remove' : self.db.remove_place,
'get_text': None,
'editor' : 'EditPlace',
'stock' : 'gramps-place',
'name_ix' : 2},
'media' : {'get_func': self.db.get_object_from_handle,
'remove' : self.db.remove_object,
'get_text': None,
'editor' : 'EditMedia',
'stock' : 'gramps-media',
'name_ix' : 4},
'repos' : {'get_func': self.db.get_repository_from_handle,
'remove' : self.db.remove_repository,
'get_text': None,
'editor' : 'EditRepository',
'stock' : 'gramps-repository',
'name_ix' : 3},
'notes' : {'get_func': self.db.get_note_from_handle,
'remove' : self.db.remove_note,
'get_text': self.get_note_text,
'editor' : 'EditNote',
'stock' : 'gramps-notes',
'name_ix' : 2},
}
self.init_gui()
def init_gui(self):
self.top = Glade()
window = self.top.toplevel
self.set_window(window, self.top.get_object('title'), self.title)
self.events_box = self.top.get_object('events_box')
self.sources_box = self.top.get_object('sources_box')
self.places_box = self.top.get_object('places_box')
self.media_box = self.top.get_object('media_box')
self.repos_box = self.top.get_object('repos_box')
self.notes_box = self.top.get_object('notes_box')
self.find_button = self.top.get_object('find_button')
self.remove_button = self.top.get_object('remove_button')
self.events_box.set_active(self.options.handler.options_dict['events'])
self.sources_box.set_active(
self.options.handler.options_dict['sources'])
self.places_box.set_active(
self.options.handler.options_dict['places'])
self.media_box.set_active(self.options.handler.options_dict['media'])
self.repos_box.set_active(self.options.handler.options_dict['repos'])
self.notes_box.set_active(self.options.handler.options_dict['notes'])
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.mark_button = self.top.get_object('mark_button')
self.mark_button.connect('clicked', self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_button')
self.unmark_button.connect('clicked', self.unmark_clicked)
self.invert_button = self.top.get_object('invert_button')
self.invert_button.connect('clicked', self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
self.sort_model = self.real_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add mark column
mark_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=RemoveUnused.MARK_COL)
mark_column.set_sort_column_id(RemoveUnused.MARK_COL)
self.warn_tree.append_column(mark_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer )
img_column.set_cell_data_func(self.img_renderer, self.get_image)
self.warn_tree.append_column(img_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=RemoveUnused.OBJ_ID_COL)
id_column.set_sort_column_id(RemoveUnused.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=RemoveUnused.OBJ_NAME_COL)
name_column.set_sort_column_id(RemoveUnused.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_remove_button_clicked": self.do_remove,
"on_find_button_clicked" : self.find,
"on_delete_event" : self.close,
})
self.dc_label = self.top.get_object('dc_label')
self.sensitive_list = [self.warn_tree, self.mark_button,
self.unmark_button, self.invert_button,
self.dc_label, self.remove_button]
for item in self.sensitive_list:
item.set_sensitive(False)
self.show()
def build_menu_names(self, obj):
return (self.title, None)
def find(self, obj):
self.options.handler.options_dict.update(
events = self.events_box.get_active(),
sources = self.sources_box.get_active(),
places = self.places_box.get_active(),
media = self.media_box.get_active(),
repos = self.repos_box.get_active(),
notes = self.notes_box.get_active(),
)
for item in self.sensitive_list:
item.set_sensitive(True)
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
self.real_model.clear()
self.collect_unused()
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
self.window.get_window().set_cursor(None)
self.reset()
# Save options
self.options.handler.save_options()
def collect_unused(self):
# Run through all requested tables and check all objects
# for being referenced some place. If not, add_results on them.
db = self.db
tables = (
('events', db.get_event_cursor, db.get_number_of_events),
('sources', db.get_source_cursor, db.get_number_of_sources),
('places', db.get_place_cursor, db.get_number_of_places),
('media', db.get_media_cursor, db.get_number_of_media_objects),
('repos', db.get_repository_cursor, db.get_number_of_repositories),
('notes', db.get_note_cursor, db.get_number_of_notes),
)
for (the_type, cursor_func, total_func) in tables:
if not self.options.handler.options_dict[the_type]:
# This table was not requested. Skip it.
continue
with cursor_func() as cursor:
self.set_total(total_func())
fbh = db.find_backlink_handles
for handle, data in cursor:
if not any(h for h in fbh(handle)):
self.add_results((the_type, handle, data))
self.update()
self.reset()
def do_remove(self, obj):
with DbTxn(_("Remove unused objects"), self.db, batch=False) as trans:
self.db.disable_signals()
for row_num in range(len(self.real_model)-1, -1, -1):
path = (row_num,)
row = self.real_model[path]
if not row[RemoveUnused.MARK_COL]:
continue
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
remove_func = self.tables[the_type]['remove']
remove_func(handle, trans)
self.real_model.remove(row.iter)
self.db.enable_signals()
self.db.request_rebuild()
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
real_path = self.sort_model.convert_path_to_child_path(sort_path)
row = self.real_model[real_path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
self.real_model.row_changed(real_path, row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = True
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = False
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
def double_click(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
real_path = self.sort_model.convert_path_to_child_path(sort_path)
row = self.real_model[real_path]
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
self.call_editor(the_type, handle)
def call_editor(self, the_type, handle):
try:
obj = self.tables[the_type]['get_func'](handle)
editor_str = 'from gramps.gui.editors import %s as editor' % (
self.tables[the_type]['editor']
)
exec(editor_str)
editor(self.dbstate, self.uistate, [], obj)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter, user_data=None):
the_type = model.get_value(iter, RemoveUnused.OBJ_TYPE_COL)
the_stock = self.tables[the_type]['stock']
cell.set_property('stock-id', the_stock)
def add_results(self, results):
(the_type, handle, data) = results
gramps_id = data[1]
# if we have a function that will return to us some type
# of text summary, then we should use it; otherwise we'll
# use the generic field index provided in the tables above
if self.tables[the_type]['get_text']:
text = self.tables[the_type]['get_text'](the_type, handle, data)
else:
# grab the text field index we know about, and hope
# it represents something useful to the user
name_ix = self.tables[the_type]['name_ix']
text = data[name_ix]
# insert a new row into the table
self.real_model.append(row=[False, gramps_id, text, the_type, handle])
def get_event_text(self, the_type, handle, data):
"""
Come up with a short line of text that we can use as
a summary to represent this event.
"""
# get the event:
event = self.tables[the_type]['get_func'](handle)
# first check to see if the event has a descriptive name
text = event.get_description() # (this is rarely set for events)
# if we don't have a description...
if text == '':
# ... then we merge together several fields
# get the event type (marriage, birth, death, etc.)
text = str(event.get_type())
# see if there is a date
date = _dd.display(event.get_date_object())
if date != '':
text += '; %s' % date
# see if there is a place
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
text += '; %s' % place.get_title()
return text
def get_note_text(self, the_type, handle, data):
"""
We need just the first few words of a note as a summary.
"""
# get the note object
note = self.tables[the_type]['get_func'](handle)
# get the note text; this ignores (discards) formatting
text = note.get()
# convert whitespace to a single space
text = u" ".join(text.split())
# if the note is too long, truncate it
if len(text) > 80:
text = text[:80] + "..."
return text
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class CheckOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
# Options specific for this report
self.options_dict = {
'events' : 1,
'sources' : 1,
'places' : 1,
'media' : 1,
'repos' : 1,
'notes' : 1,
}
self.options_help = {
'events' : ("=0/1","Whether to use check for unused events",
["Do not check events","Check events"],
True),
'sources' : ("=0/1","Whether to use check for unused sources",
["Do not check sources","Check sources"],
True),
'places' : ("=0/1","Whether to use check for unused places",
["Do not check places","Check places"],
True),
'media' : ("=0/1","Whether to use check for unused media",
["Do not check media","Check media"],
True),
'repos' : ("=0/1","Whether to use check for unused repositories",
["Do not check repositories","Check repositories"],
True),
'notes' : ("=0/1","Whether to use check for unused notes",
["Do not check notes","Check notes"],
True),
}
| arunkgupta/gramps | gramps/plugins/tool/removeunused.py | Python | gpl-2.0 | 18,389 | [
"Brian"
] | 2522da598729f59467b9ecfd7489ba3e8ecee08080b11090f98d554ebec06a34 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
url(settings.ADMIN_URL, include(admin.site.urls)),
{%- if cookiecutter.use_custom_user_model.lower() == 'y' -%}
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
{%- endif %}
{%- if cookiecutter.use_allauth == 'y' -%}
url(r'^accounts/', include('allauth.urls')),
{%- endif %}
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| stepanovsh/project_template | {{cookiecutter.repo_name}}/config/urls.py | Python | bsd-3-clause | 1,464 | [
"VisIt"
] | 70b656926ab43b7eea63efd53d3a3f0639e113605fa4ec14d9983e05f53e559b |
#!/usr/bin/env python
from numpy import *
import numpy as np
from scipy import interpolate
from matplotlib.pyplot import *
import os
def finddt(epmin, mumin, dx, dy, dz):
epmin = epmin*ep0
mumin = mumin*mu0
dtmax = 6.0/7.0*sqrt(epmin*mumin/(1.0/dx**2 + 1.0/dy**2 + 1.0/dz**2))
print "dt max = ", dtmax
return dtmax
def finddx(epmax, mumax, fmax):
epmax = epmax*ep0
mumax = mumax*mu0
wlmin = 1/(fmax*sqrt(epmax*mumax))
dxmax = wlmin
print "max dx = ", dxmax
return dxmax
def blackharrispulse(fmax, dt):
a = [0.35322222, -0.488, 0.145, -0.010222222]
T = 1.14/fmax
t = arange(0, T, dt)
window = zeros(size(t))
for n in range(4):
window = window + a[n]*cos(2*n*pi*t/T)
window[t >= T] = 0
p = window
p = window[:] - append(0,window[:-1])
p = p/max(abs(p))
plot(p)
savefig('blackharrispulse.png')
return p
def gaussian(dt, t0, nt):
global t
T = 10*t0
t = linspace(0, T, nt)
p = 10**5 * exp(-((t-3*t0)/t0)**2)
plot(t, p)
savefig('gaussian.png')
return p
def ricker(f, length, dt):
t = np.linspace(-length/2, (length-dt)/2, length/dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
plot(t, y)
savefig('ricker.png')
return y
def check_dx(srcpulse):
n = 2**int(ceil(log2(len(srcpulse))))
freqs = np.linspace(0, 1/dt/2, n/2+1)
sp = np.fft.rfft(srcpulse, n)/n
W = abs(sp)
fmax2 = max(freqs[W > max(W)/10.0])
print "!!check dx again:"
finddx(epmax, mumax, fmax2)
figure()
plot(freqs, W)
title('frequency spectrum of source')
savefig('spectral_src.png')
def findsurf(model, nn):
nz, nx = shape(model)
for k in range(nz):
if model[k, nn] > 1.1:
surf = k-1
break
surf = 30
return surf
def src_rec():
global src, rec
src = []
nsrc = 1
nshift = 0
nx_src = float(nx-2*npmlx)/(nsrc + 1)
dumx = npmlx
for i in range(nsrc):
dumx += nx_src
surf = findsurf(eps_image, dumx)
src.append([dumx, ny/2, surf, 'Ey', 1])
nsrc = len(src)
print "nsrc: ", nsrc
nrecx = 60
nrecy = 25
rec = []
nx_rec = float(nx-2*npmlx)/(nrecx+1)
ny_rec = float(ny-2*npmly)/(nrecy+1)
dumx = npmlx
dumy = npmly
for i in range(nrecx):
dumx += nx_rec
dumy = npmly
surf = findsurf(eps_image, dumx)
for j in range(nrecy):
dumy += ny_rec
rec.append([dumx+nshift, dumy+nshift, surf, 'Ey'])
nrec = len(rec)
print "nrec: ", nrec
#%%write data
with file('src.in', 'w') as fsrc:
fsrc.write("%d %d\n" % (nsrc, nt_src))
for i in range(nsrc):
fsrc.write("%d %d %d %s\n" % (
src[i][0], src[i][1], src[i][2], src[i][3]))
for i in range(nsrc):
savetxt(fsrc, array([srcpulse])*src[i][4])
with file('rec.in', 'w') as frec:
frec.write("%d\n" % (nrec))
for i in range(nrec):
frec.write("%d %d %d %s\n" % (
rec[i][0], rec[i][1], rec[i][2], rec[i][3]))
if Dum_RTM == 1:
os.system("cp ./rec.in ./RTM/rec.in")
os.system("cp ./src.in ./STD/src.in")
os.system("cp ./rec.in ./STD/rec.in")
def islice():
nslicex = 1; nslicey = 1; nslicez = 1
slicex = []; slicey = []; slicez = []
slicex.append([nx/2+1, 'Ey'])
slicey.append([ny/2+1, 'Ey'])
slicez.append([nz*3/4, 'Ey'])
print "nslicex,nslicey,nslicez: ", nslicex, nslicey, nslicez
with file('slice.in', 'w') as fslice:
fslice.write("%d %d %d\n" % (len(slicex), len(slicey), len(slicez)))
for i in range(len(slicex)):
fslice.write("%d %s\n" % (slicex[0][0], slicex[0][1]))
for i in range(len(slicey)):
fslice.write("%d %s\n" % (slicey[0][0], slicey[0][1]))
for i in range(len(slicez)):
fslice.write("%d %s\n" % (slicez[0][0], slicez[0][1]))
if Dum_RTM == 1:
os.system('cp slice.in ./STD/')
os.system("cp slice.in ./RTM/")
def distance(x, y, z, x0, y0, z0):
return sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)
def load_image(fname):
global eps_image, sig_image
pic = imread(fname)
pic = array(pic)
eps2d = zeros(shape(pic[:, :]))
sig2d = 0.01*ones(shape(pic[:, :]))
eps2d[:, :] = 10
eps2d[pic >= 1] = 12
eps2d[pic > 71] = 8
eps2d[pic > 100] = 9
eps2d[pic > 125] = 7.5
eps2d[pic > 148] = 8.5
eps2d[pic > 207] = 1.0
sig2d[pic > 207] = 0.0
# imshow(eps)
# savefig("Model2d.png")
nz, nx = shape(eps2d)
x = linspace(0, 50, nx)
z = linspace(0, 20, nz)
x2 = arange(0, 50, 0.04)
z2 = arange(0, 20, 0.04)
fep = interpolate.interp2d(x, z, eps2d, kind='linear')
fsig = interpolate.interp2d(x, z, sig2d, kind='linear')
eps_image = fep(x2, z2)
sig_image = fsig(x2, z2)
# figure()
imshow(eps_image)
savefig("Model2d_eps.png")
imshow(sig_image)
savefig("Model2d_sig.png")
def par():
with file('par.in', 'w') as fpar:
fpar.write("#dx dy dz dt\n")
fpar.write("%e %e %e %e\n" % (dx, dy, dz, dt))
fpar.write("#nx ny nz nt\n")
fpar.write("%d %d %d %d\n" % (nx, ny, nz, nt))
fpar.write("#nt of src\n")
fpar.write("%d\n" % (nt_src))
fpar.write("#output time step and space step of wavefield\n")
fpar.write("%d %d\n" % (outstep_t_wavefield, outstep_x_wavefield))
fpar.write("#output step of slice\n")
fpar.write("%d\n" % (outstep_slice))
fpar.write("#npml x y z\n")
fpar.write("%d %d %d\n" % (npmlx, npmly, npmlz))
fpar.write("#pml m kapxmax kapymax kapzmax alpha\n")
fpar.write("4 5 5 5 0.0\n")
fpar.write("#location of src\n")
fpar.write("src.in\n")
fpar.write("#location of rec\n")
fpar.write("rec.in\n")
fpar.write("#epsilon file\n")
fpar.write("eps.in\n")
fpar.write("#mu file\n")
fpar.write("mu.in\n")
fpar.write("#sigma file\n")
fpar.write("sig.in\n")
fpar.write("#slices file\n")
fpar.write("slice.in\n")
if Dum_RTM == 1:
os.system('cp par.in ./STD/')
os.system('cp par.in ./RTM/')
def plot_src_rec():
"""plot dat"""
figure()
hold(True)
for i in range(len(src)):
plot(src[i][0], src[i][2], 'vb')
#axis('equal')
xlabel("x axis")
ylabel("z axis")
xlim(0, nx)
ylim(0, nz)
gca().invert_yaxis()
gca().xaxis.tick_top()
gca().xaxis.set_label_position('top')
savefig("src_XZ.png")
figure()
hold(True)
for i in range(len(rec)):
plot(rec[i][0], rec[i][2], 'vb')
#axis('equal')
xlabel("x axis")
ylabel("z axis")
xlim(0, nx)
ylim(0, nz)
gca().invert_yaxis()
gca().xaxis.tick_top()
gca().xaxis.set_label_position('top')
savefig("rec_XZ.png")
figure()
# subplot(2,1,1)
hold(True)
for i in range(len(src)):
plot(src[i][0], src[i][1], 'vb')
#axis('equal')
xlabel("x axis")
ylabel("y axis")
xlim(0, nx)
ylim(0, ny)
savefig("src_XY.png")
figure()
hold(True)
for i in range(len(rec)):
plot(rec[i][0], rec[i][1], 'vb')
#axis('equal')
xlabel("x axis")
ylabel("y axis")
xlim(0, nx)
ylim(0, ny)
savefig("rec_XY.png")
def X_partition(nx, NUM_OF_PROCESS):
global rangex, nxSize
rangex = zeros(NUM_OF_PROCESS+1)
nxSize = zeros(NUM_OF_PROCESS)
rangex[0] = 0
for i in range(1, NUM_OF_PROCESS):
if i <= (nx % NUM_OF_PROCESS):
rangex[i] = rangex[i-1] + (nx/NUM_OF_PROCESS + 1)
else:
rangex[i] = rangex[i-1] + (nx/NUM_OF_PROCESS)
nxSize[i-1] = rangex[i] - rangex[i-1] + 2*order
rangex[NUM_OF_PROCESS] = nx
nxSize[NUM_OF_PROCESS-1] = rangex[NUM_OF_PROCESS] - rangex[NUM_OF_PROCESS-1] + 2*order
return 0
def model_partition(NUM_OF_PROCESS):
if Dum_RTM == 1:
fep_STD = open('./STD/Input/eps.in', 'w')
for ii in range(NUM_OF_PROCESS):
feps = open('./Input/eps.in_'+str(ii).zfill(3), 'w')
print feps
for i in arange(nxSize[ii]):
for j in range(ny):
for k in range(nz):
if ii == 0:
if i < order:
feps.write("%f " % (eps_image[j, rangex[ii]]))
if Dum_RTM == 1:
if k <= findsurf(eps_image, i):
fep_STD.write("%f " % (eps_image[j, rangex[ii]]))
else:
fep_STD.write("%f " % (9.0))
else:
feps.write("%f " % (
eps_image[j, rangex[ii]-order+i]))
if Dum_RTM == 1:
if k <= findsurf(eps_image, i):
fep_STD.write("%f " % (eps_image[j, rangex[ii]-order+i]))
else:
fep_STD.write("%f " % (9.0))
elif ii == NUM_OF_PROCESS - 1:
if i >= nxSize[ii] - order:
feps.write("%f " % (eps_image[j, rangex[ii] - 2*order + nxSize[ii] - 1]))
if Dum_RTM == 1:
if k <= findsurf(eps_image, i):
fep_STD.write("%f " % (eps_image[j, rangex[ii] - 2*order + nxSize[ii] - 1]))
else:
fep_STD.write("%f " % (9.0))
else:
feps.write("%f " % (eps_image[j, rangex[ii]-order+i]))
if Dum_RTM == 1:
if k <= findsurf(eps_image, i):
fep_STD.write("%f " % (eps_image[j, rangex[ii]-order+i]))
else:
fep_STD.write("%f " % (9.0))
else:
feps.write("%f " % (eps_image[j, rangex[ii]-order+i]))
if Dum_RTM == 1:
if k <= findsurf(eps_image, i):
fep_STD.write("%f " % (eps_image[j, rangex[ii]-order+i]))
else:
fep_STD.write("%f " % (9.0))
feps.close()
# fep_STD.close()
fsig = open('./Input/sig.in_'+str(ii).zfill(3), 'w')
for i in arange(nxSize[ii]):
for j in range(ny):
for k in range(nz):
fsig.write("%e " % (1.0e-3))
fsig.close()
fmu = open('./Input/mu.in_'+str(ii).zfill(3), 'w')
for i in arange(nxSize[ii]):
for j in range(ny):
for k in range(nz):
fmu.write("%f " % (1.0))
fmu.close()
if Dum_RTM == 1:
fep_STD.close()
os.system("cp ./STD/Input/eps.in* ./RTM/Input/")
if Dum_RTM == 1:
os.system('cp ./Input/mu.in* ./STD/Input/')
os.system('cp ./Input/mu.in* ./RTM/Input/')
if Dum_RTM == 1:
os.system('cp ./Input/sig.in* ./STD/Input/')
os.system('cp ./Input/sig.in* ./RTM/Input/')
return 0
def model_partition2(NUM_OF_PROCESS):
eps_image_T = eps_image.T
eps_image_edage_up = zeros((order, nz))
eps_image_edage_down = zeros((order, nz))
eps_image_edage_up[:, :] = eps_image_T[0, :]
eps_image_edage_down[:, :] = eps_image_T[-1, :]
eps_image_new = vstack((eps_image_edage_up, eps_image_T, eps_image_edage_down))
sig_image_T = sig_image.T
sig_image_edage_up = zeros((order, nz))
sig_image_edage_down = zeros((order, nz))
sig_image_edage_up[:, :] = sig_image_T[0, :]
sig_image_edage_down[:, :] = sig_image_T[-1, :]
sig_image_new = vstack((sig_image_edage_up, sig_image_T, sig_image_edage_down))
for ii in range(NUM_OF_PROCESS):
feps = open('./Input/eps.in_'+str(ii).zfill(3), 'w')
print feps
dum_eps = zeros((nxSize[ii], nz))
dum_eps[:, :] = eps_image_new[rangex[ii]:(rangex[ii+1]+2*order), :]
dum_eps_3d = zeros((nxSize[ii], 1, nz))
dum_eps_3d[:, 0, :] = dum_eps[:, :]
eps_3d = dum_eps_3d.repeat(ny, axis=1)
eps_out = eps_3d.reshape(nxSize[ii]*ny*nz)
savetxt(feps, eps_out)
feps.close()
fsig = open('./Input/sig.in_'+str(ii).zfill(3), 'w')
dum_sig = zeros((nxSize[ii], nz))
dum_sig[:, :] = sig_image_new[rangex[ii]:(rangex[ii+1]+2*order), :]
dum_sig_3d = zeros((nxSize[ii], 1, nz))
dum_sig_3d[:, 0, :] = dum_sig[:, :]
sig_3d = dum_sig_3d.repeat(ny,axis=1)
sig_out = sig_3d.reshape(nxSize[ii]*ny*nz)
savetxt(fsig, sig_out)
fsig.close()
fmu = open('./Input/mu.in_'+str(ii).zfill(3), 'w')
dum_mu = ones((nxSize[ii],ny,nz))
mu_out = dum_mu.reshape(nxSize[ii]*ny*nz)
savetxt(fmu,mu_out)
fmu.close()
def plot_partition(NUM_OF_PROCESS):
figure()
image_all_eps = []
image_all_sig = []
for ii in range(NUM_OF_PROCESS):
fname = './Input/eps.in_'+str(ii).zfill(3)
image = loadtxt(fname).reshape(nxSize[ii], ny, nz)[:, 0, :]
# image = image.resize()
# image = list(image)
# image_all.append(image[order:-order,:])
if ii == 0:
image_all_eps = image[order:-order, :]
else:
image_all_eps = vstack([image_all_eps, image[order:-order, :]])
imshow(image)
savefig('./Input/Fig_partition_eps_'+str(ii).zfill(3)+'.png')
fname = './Input/sig.in_'+str(ii).zfill(3)
image = loadtxt(fname).reshape(nxSize[ii], ny, nz)[:, 1, :]
# image = image.resize()
# image = list(image)
# image_all.append(image[order:-order,:])
if ii == 0:
image_all_sig = image[order:-order, :]
else:
image_all_sig = vstack([image_all_sig, image[order:-order, :]])
imshow(image)
savefig('./Input/Fig_partition_sig_'+str(ii).zfill(3)+'.png')
image_all_sig = array(image_all_sig)
imshow(image_all_sig.T)
savefig('Fig_All_sig.png')
image_all_eps = array(image_all_eps)
imshow(image_all_eps.T)
savefig('Fig_All_eps.png')
return 0
dir_out = './Output/'
dir_in = './Input/'
for idir in [dir_out, dir_in]:
if not os.path.exists(idir):
os.mkdir(idir)
mu0 = 1.2566370614e-6
ep0 = 8.8541878176e-12
epmin = 1.0
mumin = 1.0
epmax = 15.0
mumax = 1.0
fmax = 300e6 # Hz
npmlx = 12
npmly = npmlx
npmlz = npmlx
dx = finddx(epmax, mumax, fmax)
dx = 0.02
dy = dx
dz = dx
print "dx dy dz: ", dx, dy, dz
NUM_OF_PROCESS = 10
order = 2
nx = 250
# nx = 500
ny = 100
nz = 200
print "nx ny nz: ", nx, ny, nz
dt = finddt(epmin, mumin, dx, dy, dz)
dt = 3e-11
print "dt: ", dt
nt = 2000
srcpulse = blackharrispulse(fmax, dt)
# srcpulse = ricker(fmax,4*1/fmax,dt)
nt_src = len(srcpulse)
print "nt, nt_src: ", nt, nt_src
check_dx(srcpulse)
outstep_t_wavefield = 10
outstep_x_wavefield = 5
outstep_slice = 5
Dum_RTM = 1
if Dum_RTM == 1:
for idir in ['./STD/', './STD/Output', 'STD/Input/', './RTM', './RTM/Output', './RTM/Input']:
if not os.path.exists(idir):
os.mkdir(idir)
os.system("cp ./FDTD_MPI ./STD/")
os.system("cp ./FDTD_MPI ./RTM/")
# load_image('model_grey2.jpg')
eps_image = 9.0*ones((nz, nx))
sig_image = 0.00*ones((nz, nx))
eps_image[0:30, :] = 1.0
sig_image[0:30, :] = 0.0
src_rec()
plot_src_rec()
islice()
par()
print "NUM_OF_PROCESS: ", NUM_OF_PROCESS
X_partition(nx, NUM_OF_PROCESS)
model_partition2(NUM_OF_PROCESS)
plot_partition(NUM_OF_PROCESS)
if(raw_input('Clear ./Output?(Y)') == 'Y'):
os.system('rm -f ./Output/*')
if(raw_input('Clear ./STD/Output?(Y)') == 'Y'):
os.system('rm -f ./STD/Output/*')
if(raw_input('Clear ./RTM/Output?(Y)') == 'Y'):
os.system('rm -f ./RTM/Output/*')
"""
np.savetxt('ep.in',ep,fmt='%lf')
np.savetxt('mu.in',mu,fmt='%lf')
np.savetxt('sig.in',sig,fmt='%lf')
"""
# show()
| IFDYS/IO_MPI | mkmodel.py | Python | gpl-2.0 | 16,270 | [
"Gaussian"
] | 90bcafe60af24e0484dfc326c261c493ff55f42e2e31463d17893c396dd2049c |
import os
import re
import ssl
import sys
import tempfile
from doctest import testfile
from io import StringIO, TextIOWrapper
from unittest import TestCase
from unittest.mock import DEFAULT, Mock, patch
from urllib3.exceptions import LocationParseError
from crate.client.exceptions import ProgrammingError
from crate.crash.command import (
CrateShell,
_create_shell,
get_parser,
get_stdin,
host_and_port,
main,
noargs_command,
)
from crate.crash.commands import Command
from crate.crash.outputs import _val_len as val_len
from crate.crash.printer import ColorPrinter
from crate.testing.layer import CrateLayer
crate_http_port = 44209
crate_transport_port = 44309
crate_settings = {
'cluster.name': 'Testing44209',
'node.name': 'crate',
'psql.port': 45441,
'lang.js.enabled': True,
'http.port': crate_http_port,
'transport.tcp.port': crate_transport_port
}
node = CrateLayer.from_uri(
'https://cdn.crate.io/downloads/releases/crate-4.2.0.tar.gz',
'crate',
settings=crate_settings
)
def setUpModule():
node.start()
def tearDownModule():
node.stop()
def fake_stdin(data):
stdin = TextIOWrapper(tempfile.TemporaryFile())
stdin.write(data)
stdin.flush()
stdin.seek(0)
return stdin
class RaiseOnceSideEffect:
"""
A callable class used for mock side_effect.
The side effect raises an exception once, every subsequent call invokes the
original method.
"""
def __init__(self, exception, original):
self.exception = exception
self.original = original
self.raised = False
def __call__(self, *args, **kwargs):
if not self.raised:
self.raised = True
raise self.exception
return self.original(*args, **kwargs)
class DocumentationTest(TestCase):
def test_output(self):
testfile('../crate/crash/output.txt')
def test_connect(self):
testfile('../crate/crash/connect.txt')
class CommandTest(TestCase):
def _output_format(self, format, func, query="select name from sys.cluster"):
orig_argv = sys.argv[:]
try:
sys.argv = ["testcrash",
"-c", query,
"--hosts", node.http_url,
'--format', format
]
with patch('sys.stdout', new_callable=StringIO) as output:
with patch('sys.stderr', new_callable=StringIO) as err:
try:
main()
except SystemExit as e:
func(self, e, output, err)
finally:
sys.argv = orig_argv
def test_val_len(self):
self.assertEqual(val_len(1), 1)
self.assertEqual(val_len(12), 2)
self.assertEqual(val_len('123'), 3)
self.assertEqual(val_len(True), 4)
self.assertEqual(val_len(None), 4)
self.assertEqual(val_len([1, 2, 3]), 9)
self.assertEqual(val_len({'key': 'val'}), 14)
def test_tabulate_output(self):
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('| name |' in output)
self.assertTrue('| Testing44209 |' in output)
self._output_format('tabular', assert_func)
def test_json_output(self):
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('"name": "Testing44209"' in output)
self._output_format('json', assert_func)
def test_json_row_output(self):
query = "select table_name from information_schema.tables where table_name = 'cluster'"
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('{"table_name": "cluster"}' in output)
self._output_format('json_row', assert_func, query)
def test_csv_obj_output(self):
query = "select name, port from sys.nodes limit 1"
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue("""crate,'{"http": 44209, "psql": 45441, "transport": 44309}'""" in output)
self._output_format('csv', assert_func, query)
def test_csv_array_output(self):
query = "select ['/dev/', 'foo'] as arr"
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('["/dev/' in output)
self._output_format('csv', assert_func, query)
def test_raw_output(self):
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('"duration":' in output)
self.assertTrue('"rowcount":' in output)
self.assertTrue('"rows":' in output)
self.assertTrue('"cols":' in output)
self._output_format('raw', assert_func)
def test_mixed_output(self):
def assert_func(self, e, output, err):
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue("name | Testing44209" in output)
self._output_format('mixed', assert_func)
def test_pprint_duplicate_keys(self):
"Output: table with duplicate keys"
expected = "\n".join(["+------+------+",
"| name | name |",
"+------+------+",
"+------+------+\n"])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint([], ['name', 'name'])
self.assertEqual(expected, output.getvalue())
def test_pprint_dont_guess_type(self):
"Output: table with duplicate keys"
expected = "\n".join(["+---------+",
"| version |",
"+---------+",
"| 0.50 |",
"+---------+\n"])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint([["0.50"]], ['version'])
self.assertEqual(expected, output.getvalue())
@patch('sys.stdin', fake_stdin(u"select 'via-stdin' from sys.cluster"))
def test_stdin_cmd(self):
"Test passing in SQL via stdin"
try:
orig_argv = sys.argv[:]
tmphistory = tempfile.mkstemp()[1]
sys.argv = ['testcrash',
'--hosts', node.http_url,
'--history', tmphistory]
with patch('sys.stdout', new_callable=StringIO) as output:
try:
main()
except SystemExit as e:
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('via-stdin' in output)
finally:
try:
os.remove(tmphistory)
except IOError:
pass
sys.argv = orig_argv
sys.stdin.close()
@patch('sys.stdin', fake_stdin(u"select 'via-stdin' from sys.cluster"))
def test_cmd_precedence(self):
"""Test precedence of SQL passed in via -c vs. stdin
SQL passed in via --command should take precedence
over stdin
"""
try:
stmt = u"select 'via-command' from information_schema.tables limit 1"
orig_argv = sys.argv[:]
tmphistory = tempfile.mkstemp()[1]
sys.argv = ['testcrash',
"--command", stmt,
'--hosts', node.http_url,
'--history', tmphistory]
with patch('sys.stdout', new_callable=StringIO) as output:
try:
main()
except SystemExit as e:
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
self.assertTrue('via-command' in output)
self.assertFalse('via-stdin' in output)
finally:
try:
os.remove(tmphistory)
except IOError:
pass
sys.argv = orig_argv
sys.stdin.close()
def test_multiple_hosts(self):
orig_argv = sys.argv[:]
try:
tmphistory = tempfile.mkstemp()[1]
sys.argv = ["testcrash",
"-c", "select * from sys.cluster",
"--hosts", node.http_url, "127.0.0.1:1",
'--history', tmphistory,
'--format', 'tabular',
'-v',
]
with patch('sys.stdout', new_callable=StringIO) as output:
try:
main()
except SystemExit as e:
exception_code = e.code
self.assertEqual(exception_code, 0)
output = output.getvalue()
lines = output.split('\n')
self.assertRegex(lines[3], r'^\| http://[\d\.:]+ .*\| NULL .*\| FALSE .*\| Server not available')
self.assertRegex(lines[4], r'^\| http://[\d\.:]+. *\| crate .*\| TRUE .*\| OK')
finally:
try:
os.remove(tmphistory)
except IOError:
pass
sys.argv = orig_argv
def test_cmd_line_sys_info(self):
sys.argv = ["testcrash",
"--hosts", node.http_url,
"--sysinfo"
]
with patch('sys.stdout', new_callable=StringIO):
try:
main()
except SystemExit as e:
self.assertEqual(e.code, 0)
@patch('sys.stdin', fake_stdin('\n'.join(["create table test(",
"d string",
")",
"clustered into 2 shards",
"with (number_of_replicas=0)"])))
def test_multiline_stdin(self):
"""Test pass multiline statement via stdin
Newlines must be replaced with whitespaces
"""
stmt = ''.join(list(get_stdin())).replace('\n', ' ')
expected = ("create table test( d string ) "
"clustered into 2 shards "
"with (number_of_replicas=0)")
try:
self.assertEqual(stmt, expected)
finally:
sys.stdin.close()
@patch('sys.stdin', fake_stdin('\n'.join(["create table test(",
"d string",
")",
"clustered into 2 shards",
"with (number_of_replicas=0);"])))
def test_multiline_stdin_delimiter(self):
"""Test pass multiline statement with delimiter via stdin
Newlines must be replaced with whitespaces
"""
stmt = ''.join(list(get_stdin())).replace('\n', ' ')
expected = ("create table test( d string ) "
"clustered into 2 shards "
"with (number_of_replicas=0);")
try:
self.assertEqual(stmt, expected)
finally:
sys.stdin.close()
def test_tabulate_null_int_column(self):
"""
Create a column with a non-string value and NULL.
"""
rows = [[1], [None]]
expected = "\n".join(['+------+',
'| x |',
'+------+',
'| 1 |',
'| NULL |',
'+------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['x'])
self.assertEqual(expected, output.getvalue())
def test_tabulate_boolean_int_column(self):
"""
Create another column with a non-string value and FALSE.
"""
rows = [['FALSE'], [1]]
expected = "\n".join(['+-------+',
'| x |',
'+-------+',
'| FALSE |',
'| 1 |',
'+-------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['x'])
self.assertEqual(expected, output.getvalue())
def test_multiline_header(self):
"""
Create another column with a non-string value and FALSE.
"""
rows = [['FALSE'], [1]]
expected = "\n".join(['+-------+',
'| x |',
'| y |',
'+-------+',
'| FALSE |',
'| 1 |',
'+-------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['x\ny'])
self.assertEqual(expected, output.getvalue())
def test_multiline_row(self):
"""
Create ta column that holds rows with multiline text.
"""
rows = [['create table foo (\n id integer,\n name string\n)', 'foo\nbar', 1]]
expected = "\n".join(['+-----------------------+-----+---+',
'| show create table foo | a | b |',
'+-----------------------+-----+---+',
'| create table foo ( | foo | 1 |',
'| id integer, | bar | |',
'| name string | | |',
'| ) | | |',
'+-----------------------+-----+---+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['show create table foo', 'a', 'b'])
self.assertEqual(expected, output.getvalue())
def test_tabulate_empty_line(self):
self.maxDiff = None
rows = (
['Aldebaran', 'Star System'],
['Berlin', 'City'],
['Galactic Sector QQ7 Active J Gamma', 'Galaxy'],
['', 'Planet']
)
expected = "\n".join(['+------------------------------------+-------------+',
'| min(name) | kind |',
'+------------------------------------+-------------+',
'| Aldebaran | Star System |',
'| Berlin | City |',
'| Galactic Sector QQ7 Active J Gamma | Galaxy |',
'| | Planet |',
'+------------------------------------+-------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['min(name)', 'kind'])
# assert 0
self.assertEqual(expected, output.getvalue())
def test_empty_line_first_row_first_column(self):
self.maxDiff = None
rows = (
['', 'Planet'],
['Aldebaran', 'Star System'],
['Berlin', 'City'],
['Galactic Sector QQ7 Active J Gamma', 'Galaxy']
)
expected = "\n".join(['+------------------------------------+-------------+',
'| min(name) | kind |',
'+------------------------------------+-------------+',
'| | Planet |',
'| Aldebaran | Star System |',
'| Berlin | City |',
'| Galactic Sector QQ7 Active J Gamma | Galaxy |',
'+------------------------------------+-------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['min(name)', 'kind'])
self.assertEqual(expected, output.getvalue())
def test_empty_first_row(self):
self.maxDiff = None
rows = (
['', ''],
['Aldebaran', 'Aldebaran'],
['Algol', 'Algol'],
['Allosimanius Syneca', 'Allosimanius - Syneca'],
['Alpha Centauri', 'Alpha - Centauri']
)
expected = "\n".join(['+---------------------+-----------------------+',
'| name | replaced |',
'+---------------------+-----------------------+',
'| | |',
'| Aldebaran | Aldebaran |',
'| Algol | Algol |',
'| Allosimanius Syneca | Allosimanius - Syneca |',
'| Alpha Centauri | Alpha - Centauri |',
'+---------------------+-----------------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['name', 'replaced'])
self.assertEqual(expected, output.getvalue())
def test_any_empty(self):
self.maxDiff = None
rows = (
['Features and conformance views', 'FALSE', '', ''],
['Features and conformance views', 'TRUE', 1, 'SQL_FEATURES view'],
['Features and conformance views', 'FALSE', 2, 'SQL_SIZING view'],
['Features and conformance views', 'FALSE', 3, 'SQL_LANGUAGES view']
)
expected = "\n".join(['+--------------------------------+--------------+----------------+--------------------+',
'| feature_name | is_supported | sub_feature_id | sub_feature_name |',
'+--------------------------------+--------------+----------------+--------------------+',
'| Features and conformance views | FALSE | | |',
'| Features and conformance views | TRUE | 1 | SQL_FEATURES view |',
'| Features and conformance views | FALSE | 2 | SQL_SIZING view |',
'| Features and conformance views | FALSE | 3 | SQL_LANGUAGES view |',
'+--------------------------------+--------------+----------------+--------------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['feature_name', 'is_supported', 'sub_feature_id', 'sub_feature_name'])
self.assertEqual(expected, output.getvalue())
def test_first_column_first_row_empty(self):
self.maxDiff = None
rows = (
['', 1.0],
['Aldebaran', 1.0],
['Algol', 1.0],
['Allosimanius Syneca', 1.0],
['Alpha Centauri', 1.0],
['Argabuthon', 1.0],
['Arkintoofle Minor', 1.0],
['Galactic Sector QQ7 Active J Gamma', 1.0],
['North West Ripple', 1.0],
['Outer Eastern Rim', 1.0],
['NULL', 1.0]
)
expected = "\n".join(['+------------------------------------+--------+',
'| name | _score |',
'+------------------------------------+--------+',
'| | 1.0 |',
'| Aldebaran | 1.0 |',
'| Algol | 1.0 |',
'| Allosimanius Syneca | 1.0 |',
'| Alpha Centauri | 1.0 |',
'| Argabuthon | 1.0 |',
'| Arkintoofle Minor | 1.0 |',
'| Galactic Sector QQ7 Active J Gamma | 1.0 |',
'| North West Ripple | 1.0 |',
'| Outer Eastern Rim | 1.0 |',
'| NULL | 1.0 |',
'+------------------------------------+--------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint(rows, cols=['name', '_score'])
self.assertEqual(expected, output.getvalue())
def test_error_exit_code(self):
"""Test returns an error exit code"""
stmt = u"select * from invalid sql statement"
sys.argv = [
"testcrash",
"--command", stmt,
'--hosts', node.http_url
]
try:
main()
except SystemExit as e:
self.assertEqual(e.code, 1)
def test_verbose_with_error_trace(self):
with CrateShell(error_trace=True) as cmd:
cmd.logger = Mock()
cmd.cursor.execute = Mock(side_effect=ProgrammingError(msg="the error message",
error_trace="error trace"))
cmd._exec_and_print("select invalid statement")
cmd.logger.critical.assert_any_call("the error message")
cmd.logger.critical.assert_called_with("\nerror trace")
def test_verbose_no_error_trace(self):
with CrateShell(error_trace=True) as cmd:
cmd.logger = Mock()
cmd.cursor.execute = Mock(side_effect=ProgrammingError(msg="the error message",
error_trace=None))
cmd._exec_and_print("select invalid statement")
# only the message is logged
cmd.logger.critical.assert_called_once_with("the error message")
def test_rendering_object(self):
"""Test rendering an object"""
user = {'name': 'Arthur', 'age': 42}
expected = "\n".join(['+-------------------------------+',
'| user |',
'+-------------------------------+',
'| {"age": 42, "name": "Arthur"} |',
'+-------------------------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint([[user]], ['user'])
self.assertEqual(expected, output.getvalue())
def test_rendering_array(self):
"""Test rendering an array"""
names = ['Arthur', 'Ford']
expected = "\n".join(['+--------------------+',
'| names |',
'+--------------------+',
'| ["Arthur", "Ford"] |',
'+--------------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint([[names]], ['names'])
self.assertEqual(expected, output.getvalue())
def test_rendering_float(self):
"""Test rendering an array"""
expected = "\n".join(['+---------------+',
'| number |',
'+---------------+',
'| 3.1415926535 |',
'| 42.0 |',
'+---------------+\n'])
with CrateShell() as cmd:
with patch('sys.stdout', new_callable=StringIO) as output:
cmd.pprint([[3.1415926535], [42.0]], ['number'])
self.assertEqual(expected, output.getvalue())
def test_help_command(self):
"""Test output of help command"""
command = CrateShell(is_tty=False)
expected = "\n".join([
'\\? print this help',
'\\autocapitalize toggle automatic capitalization of SQL keywords',
'\\autocomplete toggle autocomplete',
'\\c connect to the given server, e.g.: \\connect localhost:4200',
'\\check print failed cluster and/or node checks, e.g. \\check nodes',
'\\connect connect to the given server, e.g.: \\connect localhost:4200',
'\\dt print the existing tables within the \'doc\' schema',
'\\format switch output format',
'\\q quit crash',
'\\r read and execute statements from a file',
'\\sysinfo print system and cluster info',
'\\verbose toggle verbose mode',
])
help_ = command.commands['?']
self.assertTrue(isinstance(help_, Command))
self.assertEqual(expected, help_(command))
with CrateShell(is_tty=False) as cmd:
output = StringIO()
cmd.logger = ColorPrinter(False, stream=output)
text = help_(cmd, 'arg1', 'arg2')
self.assertEqual(None, text)
self.assertEqual('Command does not take any arguments.\n', output.getvalue())
def test_noargs_decorator(self):
"""Test noargs decorator"""
output = StringIO()
class MyCmd:
logger = ColorPrinter(False, stream=output)
@noargs_command
def my_cmd(self, *args):
return 'awesome'
command = MyCmd()
command.my_cmd()
text = command.my_cmd()
self.assertEqual('awesome', text)
text = command.my_cmd('arg1')
self.assertEqual(None, text)
self.assertEqual('Command does not take any arguments.\n', output.getvalue())
def test_wrong_host_format(self):
parser = get_parser()
args = parser.parse_args([
"--hosts", "localhost:12AB"
])
crate_hosts = [host_and_port(h) for h in args.hosts]
with self.assertRaises(LocationParseError):
_create_shell(crate_hosts, False, None, False, args)
def test_command_timeout(self):
with CrateShell(node.http_url) as crash:
crash.process("""
CREATE FUNCTION fib(long)
RETURNS LONG
LANGUAGE javascript AS '
function fib(n) {
if (n < 2) return 1;
return fib(n - 1) + fib(n - 2);
}'
""")
timeout = 0.1
slow_query = "SELECT fib(35)"
# without verbose
with CrateShell(node.http_url,
error_trace=False,
timeout=timeout) as crash:
crash.logger = Mock()
crash.process(slow_query)
crash.logger.warn.assert_any_call("Use \\connect <server> to connect to one or more servers first.")
# with verbose
with CrateShell(node.http_url,
error_trace=True,
timeout=timeout) as crash:
crash.logger = Mock()
crash.process(slow_query)
crash.logger.warn.assert_any_call("No more Servers available, exception from last server: HTTPConnectionPool(host='127.0.0.1', port=44209): Read timed out. (read timeout=0.1)")
crash.logger.warn.assert_any_call("Use \\connect <server> to connect to one or more servers first.")
def test_username_param(self):
with CrateShell(node.http_url,
username='crate') as crash:
self.assertEqual(crash.username, "crate")
self.assertEqual(crash.connection.client.username, "crate")
def test_ssl_params(self):
tmpdirname = tempfile.mkdtemp()
cert_filename = os.path.join(tmpdirname, "cert_file")
key_filename = os.path.join(tmpdirname, "key_file")
ca_cert_filename = os.path.join(tmpdirname, "ca_cert_file")
open(cert_filename, 'a').close()
open(key_filename, 'a').close()
open(ca_cert_filename, 'a').close()
with CrateShell(node.http_url,
verify_ssl=False,
cert_file=cert_filename,
key_file=key_filename,
ca_cert_file=ca_cert_filename) as crash:
self.assertEqual(crash.verify_ssl, False)
self.assertEqual(crash.connection.client._pool_kw['cert_reqs'], ssl.CERT_NONE)
self.assertEqual(crash.cert_file, cert_filename)
self.assertEqual(crash.connection.client._pool_kw['cert_file'], cert_filename)
self.assertEqual(crash.key_file, key_filename)
self.assertEqual(crash.connection.client._pool_kw['key_file'], key_filename)
self.assertEqual(crash.ca_cert_file, ca_cert_filename)
self.assertEqual(crash.connection.client._pool_kw['ca_certs'], ca_cert_filename)
def test_ssl_params_missing_file(self):
argv = [
"--hosts", node.http_url,
"--verify-ssl", "false",
"--key-file", "wrong_file",
"--ca-cert-file", "ca_cert_file"
]
parser = get_parser()
with self.assertRaises(FileNotFoundError):
parser.parse_args(argv)
def test_ssl_params_wrong_permision_file(self):
tmpdirname = tempfile.mkdtemp()
ca_cert_filename = os.path.join(tmpdirname, "ca_cert_file")
open(ca_cert_filename, 'a').close()
os.chmod(ca_cert_filename, 0000)
argv = [
"--hosts", node.http_url,
"--verify-ssl", "false",
"--ca-cert-file", ca_cert_filename
]
parser = get_parser()
with self.assertRaises(PermissionError):
parser.parse_args(argv)
def test_close_shell(self):
crash = CrateShell(node.http_url)
self.assertFalse(crash.is_closed())
self.assertTrue(crash.is_conn_available())
crash.close()
self.assertTrue(crash.is_closed())
self.assertFalse(crash.is_conn_available())
with self.assertRaises(ProgrammingError) as ctx:
crash.close()
self.assertEqual('CrateShell is already closed',
ctx.exception.message)
def test_connect_info(self):
with CrateShell(node.http_url,
username='crate',
schema='test') as crash:
self.assertEqual(crash.connect_info.user, "crate")
self.assertEqual(crash.connect_info.schema, "test")
self.assertEqual(crash.connect_info.cluster, "Testing44209")
with patch.object(
crash.cursor,
"execute",
side_effect=RaiseOnceSideEffect(
ProgrammingError("SQLActionException[UnsupportedFeatureException]"),
crash.cursor.execute,
)
):
crash._fetch_session_info()
self.assertEqual(crash.connect_info.user, None)
self.assertEqual(crash.connect_info.schema, "test")
self.assertEqual(crash.connect_info.cluster, "Testing44209")
with patch.object(
crash.cursor,
"execute",
side_effect=RaiseOnceSideEffect(
ProgrammingError("SQLActionException[SchemaUnknownException]"),
crash.cursor.execute,
)
):
crash._fetch_session_info()
self.assertEqual(crash.connect_info.user, "crate")
self.assertEqual(crash.connect_info.schema, "test")
self.assertEqual(crash.connect_info.cluster, None)
with patch.object(
crash.cursor,
"execute",
side_effect=RaiseOnceSideEffect(
ProgrammingError("SQLActionException"),
crash.cursor.execute,
)
):
crash._fetch_session_info()
self.assertEqual(crash.connect_info.user, None)
self.assertEqual(crash.connect_info.schema, None)
self.assertEqual(crash.connect_info.cluster, None)
@patch.object(CrateShell, "is_conn_available")
def test_connect_info_not_available(self, is_conn_available):
is_conn_available.return_value = False
with CrateShell(node.http_url,
username='crate',
schema='test') as crash:
self.assertEqual(crash.connect_info.user, None)
self.assertEqual(crash.connect_info.schema, None)
| crate/crash | tests/test_integration.py | Python | apache-2.0 | 34,483 | [
"Galaxy"
] | 73a396cbf3abbd2630879884e36da0bd17877a0d3daab55f4a46d9c3b40bbe17 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a person.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import StringOption
#-------------------------------------------------------------------------
#
# PersonOption class
#
#-------------------------------------------------------------------------
class PersonOption(StringOption):
"""
This class describes an option that allows a person from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A friendly label to be applied to this option.
Example: "Center Person"
:type label: string
:param value: A GID of a person for this option.
Example: "p11"
:type value: string
:return: nothing
"""
StringOption.__init__(self, label, "")
| sam-m888/gprime | gprime/plug/menu/_person.py | Python | gpl-2.0 | 1,774 | [
"Brian"
] | 3ee744a13ff8860e4295bbb05afc1ae18772d117424365b932b355a4be167a18 |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through Course Builder pages."""
__author__ = 'Sean Lip'
import __builtin__
import copy
import cStringIO
import csv
import datetime
import logging
import os
import re
import shutil
import sys
import time
import urllib
import zipfile
import actions
from actions import assert_contains
from actions import assert_contains_all_of
from actions import assert_does_not_contain
from actions import assert_equals
from controllers_review import PeerReviewControllerTest
from controllers_review import PeerReviewDashboardTest
from review_stats import PeerReviewAnalyticsTest
from webtest.app import AppError
import appengine_config
from common.utils import Namespace
from controllers import lessons
from controllers import sites
from controllers import utils
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import jobs
from models import models
from models import transforms
from models import vfs
from models.courses import Course
import modules.admin.admin
from modules.announcements.announcements import AnnouncementEntity
import modules.oeditor.oeditor
from tools.etl import etl
from tools.etl import etl_lib
from tools.etl import examples
from tools.etl import remote
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# A number of data files in a test course.
COURSE_FILE_COUNT = 70
# Datastore entities that hold parts of course content. Delay-loaded.
COURSE_CONTENT_ENTITY_FILES = [
'QuestionEntity.json', 'QuestionGroupEntity.json']
# There is an expectation in our tests of automatic import of data/*.csv files,
# which is achieved below by selecting an alternative factory method.
courses.Course.create_new_default_course = (
courses.Course.custom_new_default_course_for_test)
def _add_data_entity(app_context, entity_type, data):
"""Insert new entity into a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
new_object = entity_type()
new_object.data = data
new_object.put()
return new_object
finally:
namespace_manager.set_namespace(old_namespace)
def _assert_identical_data_entity_exists(app_context, test_object):
"""Checks a specific entity exists in a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
entity_class = test_object.__class__
existing_object = entity_class().get(test_object.key())
assert existing_object
assert existing_object.data == test_object.data
assert existing_object.key().id() == test_object.key().id()
finally:
namespace_manager.set_namespace(old_namespace)
class InfrastructureTest(actions.TestBase):
"""Test core infrastructure classes agnostic to specific user roles."""
def test_value_cached_in_one_namespace_invisible_in_another(self):
"""Value cached in one namespace is not visible in another."""
# set value and check it's visible in one namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_a')
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_b')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in default namespace
assert not models.MemcacheManager.get('foo')
# check same value is not visible in None namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(None)
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# set value and check it's visible in default namespace
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
# check value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_c')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
def test_response_content_type_is_application_json_in_utf_8(self):
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
self.assertEqual(
'application/javascript; charset=utf-8',
response.headers['Content-Type'])
def test_xsrf_token_manager(self):
"""Test XSRF token operations."""
# os.environ['AUTH_DOMAIN'] = 'test_domain'
# os.environ['APPLICATION_ID'] = 'test app'
# Issues and verify anonymous user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Impersonate real user.
os.environ['USER_EMAIL'] = 'test_email'
os.environ['USER_ID'] = 'test_id'
# Issues and verify real user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Check forged time stamp invalidates token.
parts = token.split('/')
assert len(parts) == 2
forgery = '%s/%s' % (long(parts[0]) + 1000, parts[1])
assert forgery != token
assert not utils.XsrfTokenManager.is_xsrf_token_valid(forgery, action)
# Check token properly expires.
action = 'test-action'
time_in_the_past = long(
time.time() - utils.XsrfTokenManager.XSRF_TOKEN_AGE_SECS)
# pylint: disable=protected-access
old_token = utils.XsrfTokenManager._create_token(
action, time_in_the_past)
assert not utils.XsrfTokenManager.is_xsrf_token_valid(old_token, action)
# Clean up.
# del os.environ['APPLICATION_ID']
# del os.environ['AUTH_DOMAIN']
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
def test_import_course(self):
"""Tests importing one course into another."""
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
dst_course_a = courses.Course(None, app_context=dst_app_context_a)
dst_course_b = courses.Course(None, app_context=dst_app_context_b)
src_course = courses.Course(None, app_context=src_app_context)
assert not dst_course_a.get_units()
assert not dst_course_b.get_units()
assert 12 == len(src_course.get_units())
# Import 1.2 course into 1.3.
errors = []
src_course_out, dst_course_out_a = dst_course_a.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course.get_units()) == len(src_course_out.get_units())
assert len(
src_course_out.get_units()) == len(dst_course_out_a.get_units())
# add dependent entities so we can check they make it through the import
dependents = []
for dependent_entity_class in courses.COURSE_CONTENT_ENTITIES:
dependents.append(_add_data_entity(
dst_course_out_a.app_context,
dependent_entity_class, 'Test ' % dependent_entity_class))
assert dependents
# Import 1.3 course into 1.3.
errors = []
src_course_out_a, dst_course_out_b = dst_course_b.import_from(
dst_app_context_a, errors)
if errors:
raise Exception(errors)
assert src_course_out_a.get_units() == dst_course_out_b.get_units()
for dependent in dependents:
_assert_identical_data_entity_exists(
dst_course_out_b.app_context, dependent)
# Test delete.
units_to_delete = dst_course_a.get_units()
deleted_count = 0
for unit in units_to_delete:
assert dst_course_a.delete_unit(unit)
deleted_count += 1
dst_course_a.save()
assert deleted_count == len(units_to_delete)
assert not dst_course_a.get_units()
assert not dst_course_a.app_context.fs.list(os.path.join(
dst_course_a.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.reset_courses()
def test_import_13_assessment(self):
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
all_courses = sites.get_all_courses()
src_app_context = all_courses[0]
dst_app_context = all_courses[1]
src_course = courses.Course(None, app_context=src_app_context)
dst_course = courses.Course(None, app_context=dst_app_context)
# Add an assessment
src_assessment = src_course.add_assessment()
self.assertEqual('A', src_assessment.type)
src_assessment.title = 'Test Assessment'
src_assessment.release_date = '2015-01-01 12:15'
src_assessment.now_available = True
src_assessment.properties = {'key': 'value'}
src_assessment.weight = 3.14
src_assessment.html_content = 'content'
src_assessment.html_check_answers = 'check'
src_assessment.html_review_form = 'review'
src_assessment.workflow_yaml = 'a: 3'
src_course.save()
errors = []
dst_course.import_from(src_app_context, errors)
self.assertEqual(0, len(errors))
dst_assessment = dst_course.find_unit_by_id(src_assessment.unit_id)
self.assertEqual(src_assessment.__dict__, dst_assessment.__dict__)
def test_import_13_lesson(self):
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
all_courses = sites.get_all_courses()
src_app_context = all_courses[0]
dst_app_context = all_courses[1]
src_course = courses.Course(None, app_context=src_app_context)
dst_course = courses.Course(None, app_context=dst_app_context)
# Add a unit
src_unit = src_course.add_unit()
src_lesson = src_course.add_lesson(src_unit)
src_lesson.title = 'Test Lesson'
src_lesson.scored = True
src_lesson.objectives = 'objectives'
src_lesson.video = 'video'
src_lesson.notes = 'notes'
src_lesson.duration = 'duration'
src_lesson.now_available = True
src_lesson.has_activity = True
src_lesson.activity_title = 'activity title'
src_lesson.activity_listed = False
src_lesson.properties = {'key': 'value'}
src_course.save()
errors = []
dst_course.import_from(src_app_context, errors)
self.assertEqual(0, len(errors))
dst_unit = dst_course.find_unit_by_id(src_unit.unit_id)
dst_lesson = dst_course.find_lesson_by_id(
dst_unit, src_lesson.lesson_id)
self.assertEqual(src_lesson.__dict__, dst_lesson.__dict__)
def test_create_new_course(self):
"""Tests creating a new course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Add several units.
course = courses.Course(None, app_context=sites.get_all_courses()[0])
link = course.add_link()
unit = course.add_unit()
assessment = course.add_assessment()
course.save()
assert course.find_unit_by_id(link.unit_id)
assert course.find_unit_by_id(unit.unit_id)
assert course.find_unit_by_id(assessment.unit_id)
assert 3 == len(course.get_units())
assert assessment.unit_id == 3
# Check unit can be found.
assert unit == course.find_unit_by_id(unit.unit_id)
assert not course.find_unit_by_id(999)
# Update unit.
unit.title = 'Test Title'
course.update_unit(unit)
course.save()
assert 'Test Title' == course.find_unit_by_id(unit.unit_id).title
# Update assessment.
assessment_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_content = u''.join(assessment_content)
errors = []
course.set_assessment_content(assessment, assessment_content, errors)
course.save()
assert not errors
assessment_content_stored = course.app_context.fs.get(os.path.join(
course.app_context.get_home(),
course.get_assessment_filename(assessment.unit_id)))
assert assessment_content == assessment_content_stored
# Test adding lessons.
lesson_a = course.add_lesson(unit)
lesson_b = course.add_lesson(unit)
lesson_c = course.add_lesson(unit)
course.save()
assert [lesson_a, lesson_b, lesson_c] == course.get_lessons(
unit.unit_id)
assert lesson_c.lesson_id == 6
# Reorder lessons.
new_order = [
{'id': link.unit_id},
{
'id': unit.unit_id,
'lessons': [
{'id': lesson_b.lesson_id},
{'id': lesson_a.lesson_id},
{'id': lesson_c.lesson_id}]},
{'id': assessment.unit_id}]
course.reorder_units(new_order)
course.save()
assert [lesson_b, lesson_a, lesson_c] == course.get_lessons(
unit.unit_id)
# Move lesson to another unit.
another_unit = course.add_unit()
course.move_lesson_to(lesson_b, another_unit)
course.save()
assert [lesson_a, lesson_c] == course.get_lessons(unit.unit_id)
assert [lesson_b] == course.get_lessons(another_unit.unit_id)
course.delete_unit(another_unit)
course.save()
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Test public/private assessment.
assessment_url = (
'/test/' + course.get_assessment_filename(assessment.unit_id))
assert not assessment.now_available
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 403)
assessment = course.find_unit_by_id(assessment.unit_id)
assessment.now_available = True
course.update_unit(assessment)
course.save()
response = self.get(assessment_url)
assert_equals(response.status_int, 200)
# Check delayed assessment deletion.
course.delete_unit(assessment)
response = self.get(assessment_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test public/private activity.
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = False
lesson_a.has_activity = True
course.update_lesson(lesson_a)
errors = []
course.set_activity_content(lesson_a, u'var activity = []', errors)
assert not errors
activity_url = (
'/test/' + course.get_activity_filename(None, lesson_a.lesson_id))
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 403)
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = True
course.update_lesson(lesson_a)
course.save()
response = self.get(activity_url)
assert_equals(response.status_int, 200)
# Check delayed activity.
course.delete_lesson(lesson_a)
response = self.get(activity_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test deletes removes all child objects.
course.delete_unit(link)
course.delete_unit(unit)
assert not course.delete_unit(assessment)
course.save()
assert not course.get_units()
assert not course.app_context.fs.list(os.path.join(
course.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.ApplicationContext.get_environ = get_environ_old
sites.reset_courses()
def test_unit_lesson_not_available(self):
"""Tests that unavailable units and lessons behave correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
# Add a unit that is not available.
unit_1 = course.add_unit()
unit_1.now_available = False
lesson_1_1 = course.add_lesson(unit_1)
lesson_1_1.title = 'Lesson 1.1'
course.update_unit(unit_1)
# Add a unit with some lessons available and some lessons not available.
unit_2 = course.add_unit()
unit_2.now_available = True
lesson_2_1 = course.add_lesson(unit_2)
lesson_2_1.title = 'Lesson 2.1'
lesson_2_1.now_available = False
lesson_2_2 = course.add_lesson(unit_2)
lesson_2_2.title = 'Lesson 2.2'
lesson_2_2.now_available = True
course.update_unit(unit_2)
# Add a unit with all lessons not available.
unit_3 = course.add_unit()
unit_3.now_available = True
lesson_3_1 = course.add_lesson(unit_3)
lesson_3_1.title = 'Lesson 3.1'
lesson_3_1.now_available = False
course.update_unit(unit_3)
# Add a unit that is available.
unit_4 = course.add_unit()
unit_4.now_available = True
lesson_4_1 = course.add_lesson(unit_4)
lesson_4_1.title = 'Lesson 4.1'
lesson_4_1.now_available = True
course.update_unit(unit_4)
# Add an available unit with no lessons.
unit_5 = course.add_unit()
unit_5.now_available = True
course.update_unit(unit_5)
course.save()
assert [lesson_1_1] == course.get_lessons(unit_1.unit_id)
assert [lesson_2_1, lesson_2_2] == course.get_lessons(unit_2.unit_id)
assert [lesson_3_1] == course.get_lessons(unit_3.unit_id)
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
private_tag = 'id="lesson-title-private"'
# Simulate a student traversing the course.
email = 'test_unit_lesson_not_available@example.com'
name = 'Test Unit Lesson Not Available'
actions.login(email, is_admin=False)
actions.register(self, name)
# Accessing a unit that is not available redirects to the main page.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 302)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains(
'This unit does not contain any lessons.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Simulate an admin traversing the course.
email = 'test_unit_lesson_not_available@example.com_admin'
name = 'Test Unit Lesson Not Available Admin'
actions.login(email, is_admin=True)
actions.register(self, name)
# The course admin can access a unit that is not available.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 1.1', response.body)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains(
'This unit does not contain any lessons.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_custom_assessments(self):
"""Tests that custom assessments are evaluated correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
self.namespace = 'ns_test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
email = 'test_assessments@google.com'
name = 'Test Assessments'
assessment_1 = course.add_assessment()
assessment_1.title = 'first'
assessment_1.now_available = True
assessment_1.weight = 0
assessment_2 = course.add_assessment()
assessment_2.title = 'second'
assessment_2.now_available = True
assessment_2.weight = 0
course.save()
assert course.find_unit_by_id(assessment_1.unit_id)
assert course.find_unit_by_id(assessment_2.unit_id)
assert 2 == len(course.get_units())
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
first = {'score': '1.00', 'assessment_type': assessment_1.unit_id}
second = {'score': '3.00', 'assessment_type': assessment_2.unit_id}
# Update assessment 1.
assessment_1_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_1_content = u''.join(assessment_1_content)
errors = []
course.set_assessment_content(
assessment_1, assessment_1_content, errors)
course.save()
assert not errors
# Update assessment 2.
assessment_2_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Mid.js'), 'rb').readlines()
assessment_2_content = u''.join(assessment_2_content)
errors = []
course.set_assessment_content(
assessment_2, assessment_2_content, errors)
course.save()
assert not errors
# Register.
actions.login(email)
actions.register(self, name)
# Submit assessment 1.
actions.submit_assessment(self, assessment_1.unit_id, first)
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
assert student_scores[0]['title'] == 'first'
assert student_scores[0]['weight'] == 0
assert student_scores[1]['id'] == str(assessment_2.unit_id)
assert student_scores[1]['score'] == 0
assert student_scores[1]['title'] == 'second'
assert student_scores[1]['weight'] == 0
# The overall score is None if there are no weights assigned to any of
# the assessments.
overall_score = course.get_overall_score(student)
assert overall_score is None
# View the student profile page.
response = self.get('student/home')
assert_does_not_contain('Overall course score', response.body)
# Add a weight to the first assessment.
assessment_1.weight = 10
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Submit assessment 2.
actions.submit_assessment(self, assessment_2.unit_id, second)
# We need to reload the student instance, because its properties have
# changed.
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[1]['score'] == 3
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Change the weight of assessment 2.
assessment_2.weight = 30
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
# Save all changes.
course.save()
# View the student profile page.
response = self.get('student/home')
assert_contains('assessment-score-first">1</span>', response.body)
assert_contains('assessment-score-second">3</span>', response.body)
assert_contains('Overall course score', response.body)
assert_contains('assessment-score-overall">2</span>', response.body)
# Submitting a lower score for any assessment does not change any of
# the scores, since the system records the maximum score that has ever
# been achieved on any assessment.
first_retry = {'score': '0', 'assessment_type': assessment_1.unit_id}
actions.submit_assessment(self, assessment_1.unit_id, first_retry)
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_datastore_backed_file_system(self):
"""Tests datastore-backed file system operations."""
fs = vfs.AbstractFileSystem(vfs.DatastoreBackedFileSystem('', '/'))
# Check binary file.
src = os.path.join(appengine_config.BUNDLE_ROOT, 'course.yaml')
dst = os.path.join('/', 'course.yaml')
fs.put(dst, open(src, 'rb'))
stored = fs.open(dst)
assert stored.metadata.size == len(open(src, 'rb').read())
assert not stored.metadata.is_draft
assert stored.read() == open(src, 'rb').read()
# Check draft.
fs.put(dst, open(src, 'rb'), is_draft=True)
stored = fs.open(dst)
assert stored.metadata.is_draft
# Check text files with non-ASCII characters and encoding.
foo_js = os.path.join('/', 'assets/js/foo.js')
foo_text = u'This is a test text (тест данные).'
fs.put(foo_js, vfs.string_to_stream(foo_text))
stored = fs.open(foo_js)
assert vfs.stream_to_string(stored) == foo_text
# Check delete.
del_file = os.path.join('/', 'memcache.test')
fs.put(del_file, vfs.string_to_stream(u'test'))
assert fs.isfile(del_file)
fs.delete(del_file)
assert not fs.isfile(del_file)
# Check open or delete of non-existent does not fail.
assert not fs.open('/foo/bar/baz')
assert not fs.delete('/foo/bar/baz')
# Check new content fully overrides old (with and without memcache).
test_file = os.path.join('/', 'memcache.test')
fs.put(test_file, vfs.string_to_stream(u'test text'))
stored = fs.open(test_file)
assert u'test text' == vfs.stream_to_string(stored)
fs.delete(test_file)
# Check file existence.
assert not fs.isfile('/foo/bar')
assert fs.isfile('/course.yaml')
assert fs.isfile('/assets/js/foo.js')
# Check file listing.
bar_js = os.path.join('/', 'assets/js/bar.js')
fs.put(bar_js, vfs.string_to_stream(foo_text))
baz_js = os.path.join('/', 'assets/js/baz.js')
fs.put(baz_js, vfs.string_to_stream(foo_text))
assert fs.list('/') == sorted([
u'/course.yaml',
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert fs.list('/assets') == sorted([
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert not fs.list('/foo/bar')
def test_utf8_datastore(self):
"""Test writing to and reading from datastore using UTF-8 content."""
event = models.EventEntity()
event.source = 'test-source'
event.user_id = 'test-user-id'
event.data = u'Test Data (тест данные)'
event.put()
stored_event = models.EventEntity().get_by_id([event.key().id()])
assert 1 == len(stored_event)
assert event.data == stored_event[0].data
def assert_queriable(self, entity, name, date_type=datetime.datetime):
"""Create some entities and check that single-property queries work."""
for i in range(1, 32):
item = entity(
key_name='%s_%s' % (date_type.__class__.__name__, i))
setattr(item, name, date_type(2012, 1, i))
item.put()
# Descending order.
items = entity.all().order('-%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 31)
# Ascending order.
items = entity.all().order('%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 1)
def test_indexed_properties(self):
"""Test whether entities support specific query types."""
# A 'DateProperty' or 'DateTimeProperty' of each persistent entity must
# be indexed. This is true even if the application doesn't execute any
# queries relying on the index. The index is still critically important
# for managing data, for example, for bulk data download or for
# incremental computations. Using index, the entire table can be
# processed in daily, weekly, etc. chunks and it is easy to query for
# new data. If we did not have an index, chunking would have to be done
# by the primary index, where it is impossible to separate recently
# added/modified rows from the rest of the data. Having this index adds
# to the cost of datastore writes, but we believe it is important to
# have it. Below we check that all persistent date/datetime properties
# are indexed.
self.assert_queriable(
AnnouncementEntity, 'date', date_type=datetime.date)
self.assert_queriable(models.EventEntity, 'recorded_on')
self.assert_queriable(models.Student, 'enrolled_on')
self.assert_queriable(models.StudentAnswersEntity, 'updated_on')
self.assert_queriable(jobs.DurableJobEntity, 'updated_on')
def test_config_visible_from_any_namespace(self):
"""Test that ConfigProperty is visible from any namespace."""
assert (
config.UPDATE_INTERVAL_SEC.value ==
config.UPDATE_INTERVAL_SEC.default_value)
new_value = config.UPDATE_INTERVAL_SEC.default_value + 5
# Add datastore override for known property.
prop = config.ConfigPropertyEntity(
key_name=config.UPDATE_INTERVAL_SEC.name)
prop.value = str(new_value)
prop.is_draft = False
prop.put()
# Check visible from default namespace.
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
# Check visible from another namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
'ns-test_config_visible_from_any_namespace')
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
finally:
namespace_manager.set_namespace(old_namespace)
class AdminAspectTest(actions.TestBase):
"""Test site from the Admin perspective."""
def test_appstats(self):
"""Checks that appstats is available when enabled."""
email = 'test_appstats@google.com'
# check appstats is disabled by default
actions.login(email, is_admin=True)
response = self.testapp.get('/admin')
assert_equals(response.status_int, 200)
assert_does_not_contain('>Appstats</a>', response.body)
assert_does_not_contain('/admin/stats/', response.body)
# enable and check appstats is now enabled
os.environ['GCB_APPSTATS_ENABLED'] = 'True'
response = self.testapp.get('/admin')
assert_equals(response.status_int, 200)
assert_contains('>Appstats</a>', response.body)
assert_contains('/admin/stats/', response.body)
del os.environ['GCB_APPSTATS_ENABLED']
def test_courses_page_for_multiple_courses(self):
"""Tests /admin page showing multiple courses."""
# Setup courses.
sites.setup_courses('course:/aaa::ns_a, course:/bbb::ns_b, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
# This test requires a read-write file system. If test runs on read-
# only one, we can't run this test :(
if (not dst_app_context_a.fs.is_read_write() or
not dst_app_context_a.fs.is_read_write()):
return
course_a = courses.Course(None, app_context=dst_app_context_a)
course_b = courses.Course(None, app_context=dst_app_context_b)
unused_course, course_a = course_a.import_from(src_app_context)
unused_course, course_b = course_b.import_from(src_app_context)
# Rename courses.
dst_app_context_a.fs.put(
dst_app_context_a.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course AAA\''))
dst_app_context_b.fs.put(
dst_app_context_b.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course BBB\''))
# Login.
email = 'test_courses_page_for_multiple_courses@google.com'
actions.login(email, is_admin=True)
# Check the course listing page.
response = self.testapp.get('/admin')
assert_contains_all_of([
'Course AAA',
'/aaa/dashboard',
'Course BBB',
'/bbb/dashboard'], response.body)
# Clean up.
sites.reset_courses()
def test_python_console(self):
"""Test access rights to the Python console."""
email = 'test_python_console@google.com'
# The default is that the console should be turned off
self.assertFalse(modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED)
# Test the console when it is enabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = True
# Check normal user has no access.
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 302)
response = self.testapp.post('/admin?action=console')
assert_equals(response.status_int, 302)
# Check delegated admin has no access.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
del os.environ['gcb_admin_user_emails']
# Check actual admin has access.
actions.login(email, is_admin=True)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
response.form.set('code', 'print "foo" + "bar"')
response = self.submit(response.form)
assert_contains('foobar', response.body)
# Finally, test that the console is not found when it is disabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = False
actions.login(email, is_admin=True)
self.testapp.get('/admin?action=console', status=404)
self.testapp.post('/admin?action=console_run', status=404)
def test_non_admin_has_no_access(self):
"""Test non admin has no access to pages or REST endpoints."""
email = 'test_non_admin_has_no_access@google.com'
actions.login(email)
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has no access to specific pages and actions.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
response = self.testapp.post(
'/admin?action=config_reset&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
# Check user has no rights to GET verb.
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
# Here are the endpoints we want to test: (uri, xsrf_action_name).
endpoints = [
('/rest/config/item', 'config-property-put'),
('/rest/courses/item', 'add-course-put')]
# Check user has no rights to PUT verb.
payload_dict = {}
payload_dict['value'] = '666'
payload_dict['is_draft'] = False
request = {}
request['key'] = 'gcb_config_update_interval_sec'
request['payload'] = transforms.dumps(payload_dict)
for uri, unused_action in endpoints:
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check user still has no rights to PUT verb even if he somehow
# obtained a valid XSRF token.
for uri, action in endpoints:
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(action)
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
def test_admin_list(self):
"""Test delegation of admin access to another user."""
email = 'test_admin_list@google.com'
actions.login(email)
# Add environment variable override.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has access now.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 200)
# Check overrides are active and have proper management actions.
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('[test_admin_list@google.com]', response.body)
assert_contains(
'/admin?action=config_override&name=gcb_admin_user_emails',
response.body)
assert_contains(
'/admin?action=config_edit&name=gcb_config_update_interval_sec',
response.body)
# Check editor page has proper actions.
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
assert_contains('/admin?action=config_reset', response.body)
assert_contains('name=gcb_config_update_interval_sec', response.body)
# Remove override.
del os.environ['gcb_admin_user_emails']
# Check user has no access.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_access_to_admin_pages(self):
"""Test access to admin pages."""
# assert anonymous user has no access
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
# assert admin user has access
email = 'test_access_to_admin_pages@google.com'
name = 'Test Access to Admin Pages'
actions.login(email, is_admin=True)
actions.register(self, name)
response = self.testapp.get('/admin')
assert_contains('Power Searching with Google', response.body)
assert_contains('All Courses', response.body)
response = self.testapp.get('/admin?action=settings')
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('gcb_config_update_interval_sec', response.body)
assert_contains('All Settings', response.body)
response = self.testapp.get('/admin?action=perf')
assert_contains('gcb-admin-uptime-sec:', response.body)
assert_contains('In-process Performance Counters', response.body)
response = self.testapp.get('/admin?action=deployment')
assert_contains('application_id: testbed-test', response.body)
assert_contains('About the Application', response.body)
actions.unregister(self)
actions.logout()
# assert not-admin user has no access
actions.login(email)
actions.register(self, name)
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_multiple_courses(self):
"""Test courses admin page with two courses configured."""
sites.setup_courses(
'course:/foo:/foo-data, course:/bar:/bar-data:nsbar')
email = 'test_multiple_courses@google.com'
actions.login(email, is_admin=True)
response = self.testapp.get('/admin')
assert_contains('Course Builder > Admin > Courses', response.body)
assert_contains('Total: 2 item(s)', response.body)
# Check ocurse URL's.
assert_contains('<a href="/foo/dashboard">', response.body)
assert_contains('<a href="/bar/dashboard">', response.body)
# Check content locations.
assert_contains('/foo-data', response.body)
assert_contains('/bar-data', response.body)
# Check namespaces.
assert_contains('gcb-course-foo-data', response.body)
assert_contains('nsbar', response.body)
# Clean up.
sites.reset_courses()
def test_add_course(self):
"""Tests adding a new course entry."""
if not self.supports_editing:
return
email = 'test_add_course@google.com'
actions.login(email, is_admin=True)
# Prepare request data.
payload_dict = {
'name': 'add_new',
'title': u'new course (тест данные)', 'admin_email': 'foo@bar.com'}
request = {}
request['payload'] = transforms.dumps(payload_dict)
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'add-course-put')
# Execute action.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
# Check response.
json_dict = transforms.loads(transforms.loads(response.body)['payload'])
assert 'course:/add_new::ns_add_new' == json_dict.get('entry')
# Re-execute action; should fail as this would create a duplicate.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_equals(412, transforms.loads(response.body)['status'])
# Load the course and check its title.
new_app_context = sites.get_all_courses(
'course:/add_new::ns_add_new')[0]
assert_equals(u'new course (тест данные)', new_app_context.get_title())
new_course = courses.Course(None, app_context=new_app_context)
assert not new_course.get_units()
class CourseAuthorAspectTest(actions.TestBase):
"""Tests the site from the Course Author perspective."""
def test_dashboard(self):
"""Test course dashboard."""
email = 'test_dashboard@google.com'
name = 'Test Dashboard'
# Non-admin does't have access.
actions.login(email)
response = self.get('dashboard')
assert_equals(response.status_int, 302)
actions.register(self, name)
assert_equals(response.status_int, 302)
actions.logout()
# Admin has access.
actions.login(email, is_admin=True)
response = self.get('dashboard')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Outline' %
self.canonicalize('dashboard'),
response.body)
# Tests outline view.
response = self.get('dashboard')
assert_contains('Unit 3 - Advanced techniques', response.body)
assert_contains('data/lesson.csv', response.body)
# Check editability.
if self.supports_editing:
assert_contains('Add Assessment', response.body)
else:
assert_does_not_contain('Add Assessment', response.body)
# Test assets view.
response = self.get('dashboard?action=assets')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Assets' %
self.canonicalize('dashboard'),
response.body)
assert_contains('assets/css/main.css', response.body)
assert_contains('assets/img/Image1.5.png', response.body)
assert_contains('assets/js/activity-3.2.js', response.body)
# Test settings view.
response = self.get('dashboard?action=settings')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Settings' %
self.canonicalize('dashboard'),
response.body)
assert_contains('course.yaml', response.body)
assert_contains(
'title: 'Power Searching with Google'', response.body)
assert_contains('locale: 'en_US'', response.body)
# Check editability.
if self.supports_editing:
assert_contains('create_or_edit_settings', response.body)
else:
assert_does_not_contain('create_or_edit_settings', response.body)
# Tests student statistics view.
response = self.get('dashboard?action=analytics')
# Verify title does not have link text
assert_contains(
'<title>Course Builder > Power Searching with Google > Dash',
response.body)
# Verify body does have linked breadcrumb trail.
assert_contains(
'Google ><a href="%s"> Dashboard </a>> Analytics' %
self.canonicalize('dashboard'),
response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 5
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 1', response.body)
assert_contains('total: 1', response.body)
# Tests assessment statistics.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
for i in range(5):
student = models.Student(key_name='key-%s' % i)
student.is_enrolled = True
student.scores = transforms.dumps({'test-assessment': i})
student.put()
finally:
namespace_manager.set_namespace(old_namespace)
response = self.get('dashboard?action=analytics')
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('currently enrolled: 6', response.body)
assert_contains(
'test-assessment: completed 5, average score 2.0', response.body)
def test_trigger_sample_announcements(self):
"""Test course author can trigger adding sample announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_contains('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
def test_manage_announcements(self):
"""Test course author can manage announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
# add new
response = actions.view_announcements(self)
add_form = response.forms['gcb-add-announcement']
response = self.submit(add_form)
assert_equals(response.status_int, 302)
# check edit form rendering
response = self.testapp.get(response.location)
assert_equals(response.status_int, 200)
assert_contains('/rest/announcements/item?key=', response.body)
# check added
response = actions.view_announcements(self)
assert_contains('Sample Announcement (Draft)', response.body)
# delete draft
response = actions.view_announcements(self)
delete_form = response.forms['gcb-delete-announcement-1']
response = self.submit(delete_form)
assert_equals(response.status_int, 302)
# check deleted
assert_does_not_contain('Welcome to the final class!', response.body)
def test_announcements_rest(self):
"""Test REST access to announcements."""
email = 'test_announcements_rest@google.com'
name = 'Test Announcements Rest'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_does_not_contain('My Test Title', response.body)
# REST GET existing item
items = AnnouncementEntity.all().fetch(1)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 200
assert 'message' in json_dict
assert 'payload' in json_dict
payload_dict = transforms.loads(json_dict['payload'])
assert 'title' in payload_dict
assert 'date' in payload_dict
# REST PUT item
payload_dict['title'] = u'My Test Title Мой заголовок теста'
payload_dict['date'] = '2012/12/31'
payload_dict['is_draft'] = True
payload_dict['send_email'] = False
request = {}
request['key'] = str(item.key())
request['payload'] = transforms.dumps(payload_dict)
# Check XSRF is required.
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = json_dict['xsrf_token']
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 200', response.body)
# Confirm change is visible on the page.
response = self.get('announcements')
assert_contains(
u'My Test Title Мой заголовок теста (Draft)', response.body)
# REST GET not-existing item
response = self.get('rest/announcements/item?key=not_existent_key')
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 404
class StudentAspectTest(actions.TestBase):
"""Test the site from the Student perspective."""
def test_view_announcements(self):
"""Test student aspect of announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email)
actions.register(self, name)
# Check no announcements yet.
response = actions.view_announcements(self)
assert_does_not_contain('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_contains('No announcements yet.', response.body)
actions.logout()
# Login as admin and add announcements.
actions.login('admin@sample.com', is_admin=True)
actions.register(self, 'admin')
response = actions.view_announcements(self)
actions.logout()
# Check we can see non-draft announcements.
actions.login(email)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
# Check no access to access to draft announcements via REST handler.
items = AnnouncementEntity.all().fetch(1000)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
if item.is_draft:
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
else:
assert_equals(response.status_int, 200)
def test_registration(self):
"""Test student registration."""
email = 'test_registration@example.com'
name1 = 'Test Student'
name2 = 'John Smith'
name3 = u'Pavel Simakov (тест данные)'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
actions.register(self, name3)
actions.check_profile(self, name3)
def test_course_not_available(self):
"""Tests course is only accessible to author when incomplete."""
email = 'test_course_not_available@example.com'
name = 'Test Course Not Available'
actions.login(email)
actions.register(self, name)
# Check preview and static resources are available.
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Check preview and static resources are not available to Student.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 404)
response = self.get('assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Check preview and static resources are still available to author.
actions.login(email, is_admin=True)
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_registration_closed(self):
"""Test student registration when course is full."""
email = 'test_registration_closed@example.com'
name = 'Test Registration Closed'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['reg_form']['can_register'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Try to login and register.
actions.login(email)
try:
actions.register(self, name)
raise actions.ShouldHaveFailedByNow(
'Expected to fail: new registrations should not be allowed '
'when registration is closed.')
except actions.ShouldHaveFailedByNow as e:
raise e
except:
pass
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_registration_with_additional_fields(self):
"""Registers a new student with customized registration form."""
email = 'test_registration_with_additional_fields@example.com'
name = 'Test Registration with Additional Fields'
zipcode = '94043'
score = '99'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
"""Insert additional fields into course.yaml."""
environ = get_environ_old(self)
environ['course']['browsable'] = False
environ['reg_form']['additional_registration_fields'] = (
'\'<!-- reg_form.additional_registration_fields -->'
'<li>'
'<label class="form-label" for="form02"> What is your zipcode?'
'</label><input name="form02" type="text"></li>'
'<li>'
'<label class="form-label" for="form03"> What is your score?'
'</label> <input name="form03" type="text"></li>\''
)
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Login and register.
actions.login(email)
actions.register_with_additional_fields(self, name, zipcode, score)
# Verify that registration results in capturing additional registration
# questions.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
student = models.Student.get_enrolled_student_by_email(email)
# Check that two registration additional fields are populated
# with correct values.
if student.additional_fields:
json_dict = transforms.loads(student.additional_fields)
assert zipcode == json_dict[2][1]
assert score == json_dict[3][1]
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
namespace_manager.set_namespace(old_namespace)
def test_permissions(self):
"""Test student permissions, and which pages they can view."""
email = 'test_permissions@example.com'
name = 'Test Permissions'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
actions.login(email)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
actions.unregister(self)
actions.Permissions.assert_unenrolled(self)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_login_and_logout(self):
"""Test if login and logout behave as expected."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
email = 'test_login_logout@example.com'
actions.Permissions.assert_logged_out(self)
actions.login(email)
actions.Permissions.assert_unenrolled(self)
actions.logout()
actions.Permissions.assert_logged_out(self)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_lesson_activity_navigation(self):
"""Test navigation between lesson/activity pages."""
email = 'test_lesson_activity_navigation@example.com'
name = 'Test Lesson Activity Navigation'
actions.login(email)
actions.register(self, name)
response = self.get('unit?unit=1&lesson=1')
assert_does_not_contain('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=2&lesson=3')
assert_contains('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=3&lesson=5')
assert_contains('Previous Page', response.body)
assert_does_not_contain('Next Page', response.body)
assert_contains('End', response.body)
def test_show_hide_lesson_navigation(self):
"""Test display of lesson navigation buttons."""
email = 'test_show_hide_of_lesson_navigation@example.com'
name = 'Test Show/Hide of Lesson Navigation'
actions.login(email)
actions.register(self, name)
# The default behavior is to show the lesson navigation buttons.
response = self.get('unit?unit=2&lesson=3')
assert_contains(
'<div class="gcb-button-box" >', response.body)
assert_does_not_contain(
'<div class="gcb-button-box" style="display: none;">',
response.body)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['unit']['hide_lesson_navigation_buttons'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
# The lesson navigation buttons should now be hidden.
response = self.get('unit?unit=2&lesson=3')
assert_contains(
'<div class="gcb-button-box" style="display: none;">',
response.body)
assert_does_not_contain(
'<div class="gcb-button-box" >', response.body)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_attempt_activity_event(self):
"""Test activity attempt generates event."""
email = 'test_attempt_activity_event@example.com'
name = 'Test Attempt Activity Event'
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Prepare event.
request = {}
request['source'] = 'test-source'
request['payload'] = transforms.dumps({'Alice': u'Bob (тест данные)'})
# Check XSRF token is required.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'event-post')
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check event is properly recorded.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
events = models.EventEntity.all().fetch(1000)
assert 1 == len(events)
assert_contains(
u'Bob (тест данные)',
transforms.loads(events[0].data)['Alice'])
finally:
namespace_manager.set_namespace(old_namespace)
# Clean up.
config.Registry.test_overrides = {}
def test_two_students_dont_see_each_other_pages(self):
"""Test a user can't see another user pages."""
email1 = 'user1@foo.com'
name1 = 'User 1'
email2 = 'user2@foo.com'
name2 = 'User 2'
# Login as one user and view 'unit' and other pages, which are not
# cached.
actions.login(email1)
actions.register(self, name1)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email1, response.body)
actions.logout()
# Login as another user and check that 'unit' and other pages show
# the correct new email.
actions.login(email2)
actions.register(self, name2)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email2, response.body)
actions.logout()
def test_xsrf_defence(self):
"""Test defense against XSRF attack."""
email = 'test_xsrf_defence@example.com'
name = 'Test Xsrf Defence'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
edit_form = actions.get_form_by_action(response, 'student/editstudent')
edit_form.set('name', 'My New Name')
edit_form.set('xsrf_token', 'bad token')
response = edit_form.submit(expect_errors=True)
assert_equals(response.status_int, 403)
def test_autoescaping(self):
"""Test Jinja autoescaping."""
email = 'test_autoescaping@example.com'
name1 = '<script>alert(1);</script>'
name2 = '<script>alert(2);</script>'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
def test_response_headers(self):
"""Test dynamically-generated responses use proper headers."""
email = 'test_response_headers@example.com'
name = 'Test Response Headers'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
assert_equals(response.status_int, 200)
assert_contains('must-revalidate', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Pragma'])
assert_contains('Mon, 01 Jan 1990', response.headers['Expires'])
def test_browsability_permissions(self):
"""Tests that the course browsability flag works correctly."""
# By default, courses are browsable.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains('<a href="assessment?name=Pre"', response.body)
assert_does_not_contain('progress-notstarted-Pre', response.body)
actions.Permissions.assert_can_browse(self)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
actions.Permissions.assert_logged_out(self)
# Check course page redirects.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 302)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
class StudentUnifiedProfileTest(StudentAspectTest):
"""Tests student actions having unified profile enabled."""
def setUp(self): # pylint: disable=g-bad-name
super(StudentUnifiedProfileTest, self).setUp()
config.Registry.test_overrides[
models.CAN_SHARE_STUDENT_PROFILE] = True
def tearDown(self): # pylint: disable=g-bad-name
config.Registry.test_overrides = {}
super(StudentUnifiedProfileTest, self).tearDown()
class StaticHandlerTest(actions.TestBase):
"""Check serving of static resources."""
def test_disabled_modules_has_no_routes(self):
"""Test that disabled modules has no routes."""
assert modules.oeditor.oeditor.custom_module.enabled
assert modules.oeditor.oeditor.custom_module.global_routes
assert modules.oeditor.oeditor.custom_module.namespaced_routes
modules.oeditor.oeditor.custom_module.disable()
try:
assert not modules.oeditor.oeditor.custom_module.enabled
assert not modules.oeditor.oeditor.custom_module.global_routes
assert not modules.oeditor.oeditor.custom_module.namespaced_routes
finally:
modules.oeditor.oeditor.custom_module.enable()
def test_static_files_cache_control(self):
"""Test static/zip handlers use proper Cache-Control headers."""
# Check static handler.
response = self.get('/assets/css/main.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
# Check zip file handler.
response = self.testapp.get(
'/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
class ActivityTest(actions.TestBase):
"""Test for activities."""
def get_activity(self, unit_id, lesson_id, args):
"""Retrieve the activity page for a given unit and lesson id."""
response = self.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
assert_equals(response.status_int, 200)
assert_contains(
'<script src="assets/js/activity-%s.%s.js"></script>' %
(unit_id, lesson_id), response.body)
assert_contains('assets/lib/activity-generic-1.3.js', response.body)
js_response = self.get('assets/lib/activity-generic-1.3.js')
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
return response, args
def test_activities(self):
"""Test that activity submissions are handled and recorded correctly."""
email = 'test_activities@google.com'
name = 'Test Activities'
unit_id = 1
lesson_id = 2
activity_submissions = {
'1.2': {
'index': 3,
'type': 'activity-choice',
'value': 3,
'correct': True,
},
}
# Register.
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Navigate to the course overview page, and check that the unit shows
# no progress yet.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-notstarted-%s"' % unit_id, response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
response, args = self.get_activity(unit_id, lesson_id, {})
# Check that the current activity shows no progress yet.
assert_contains(
u'id="progress-notstarted-%s-activity"' %
lesson_id, response.body)
# Prepare activity submission event.
args['source'] = 'attempt-activity'
lesson_key = '%s.%s' % (unit_id, lesson_id)
assert lesson_key in activity_submissions
args['payload'] = activity_submissions[lesson_key]
args['payload']['location'] = (
'http://localhost:8080/activity?unit=%s&lesson=%s' %
(unit_id, lesson_id))
args['payload'] = transforms.dumps(args['payload'])
# Submit the request to the backend.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(args)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check that the current activity shows partial progress.
response, args = self.get_activity(unit_id, lesson_id, {})
assert_contains(
u'id="progress-inprogress-%s-activity"' %
lesson_id, response.body)
# Navigate to the course overview page and check that the unit shows
# partial progress.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-inprogress-%s"' % unit_id, response.body)
finally:
namespace_manager.set_namespace(old_namespace)
def test_progress(self):
"""Test student activity progress in detail, using the sample course."""
class FakeHandler(object):
def __init__(self, app_context):
self.app_context = app_context
course = Course(FakeHandler(sites.get_all_courses()[0]))
tracker = course.get_progress_tracker()
student = models.Student(key_name='key-test-student')
# Initially, all progress entries should be set to zero.
unit_progress = tracker.get_unit_progress(student)
for key in unit_progress:
assert unit_progress[key] == 0
lesson_progress = tracker.get_lesson_progress(student, 1)
for key in lesson_progress:
assert lesson_progress[key] == {'html': 0, 'activity': 0}
# The blocks in Lesson 1.2 with activities are blocks 3 and 6.
# Submitting block 3 should trigger an in-progress update.
tracker.put_block_completed(student, 1, 2, 3)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 1
}
# Submitting block 6 should trigger a completion update for the
# activity, but Lesson 1.2 is still incomplete.
tracker.put_block_completed(student, 1, 2, 6)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 2
}
# Visiting the HTML page for Lesson 1.2 completes the lesson.
tracker.put_html_accessed(student, 1, 2)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 2, 'activity': 2
}
# Test a lesson with no interactive blocks in its activity. It should
# change its status to 'completed' once it is accessed.
tracker.put_activity_accessed(student, 2, 1)
assert tracker.get_unit_progress(student)['2'] == 1
assert tracker.get_lesson_progress(student, 2)[1] == {
'html': 0, 'activity': 2
}
# Test that a lesson without activities (Lesson 1.1) doesn't count.
# Complete lessons 1.3, 1.4, 1.5 and 1.6; unit 1 should then be marked
# as 'completed' even though we have no events associated with
# Lesson 1.1.
tracker.put_html_accessed(student, 1, 1)
tracker.put_html_accessed(student, 1, 3)
tracker.put_html_accessed(student, 1, 4)
tracker.put_html_accessed(student, 1, 5)
tracker.put_html_accessed(student, 1, 6)
tracker.put_activity_completed(student, 1, 3)
tracker.put_activity_completed(student, 1, 4)
tracker.put_activity_completed(student, 1, 5)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 6)
assert tracker.get_unit_progress(student)['1'] == 2
# Test that a unit is not completed until all HTML and activity pages
# have been, at least, visited. Unit 6 has 3 lessons; the last one has
# no activity block.
tracker.put_html_accessed(student, 6, 1)
tracker.put_html_accessed(student, 6, 2)
tracker.put_activity_completed(student, 6, 1)
tracker.put_activity_completed(student, 6, 2)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_activity_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_html_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 2
# Test assessment counters.
pre_id = 'Pre'
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 1
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 2
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 3
# Test that invalid keys do not lead to any updates.
# Invalid assessment id.
fake_id = 'asdf'
tracker.put_assessment_completed(student, fake_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_assessment_completed(progress, fake_id)
assert tracker.get_assessment_status(progress, fake_id) is None
# Invalid unit id.
tracker.put_activity_completed(student, fake_id, 1)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, fake_id, 1) is None
# Invalid lesson id.
fake_numeric_id = 22
tracker.put_activity_completed(student, 1, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, 1, fake_numeric_id) is None
# Invalid block id.
tracker.put_block_completed(student, 5, 2, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_block_completed(
progress, 5, 2, fake_numeric_id)
class AssessmentTest(actions.TestBase):
"""Test for assessments."""
def test_course_pass(self):
"""Test student passing final exam."""
email = 'test_pass@google.com'
name = 'Test Pass'
post = {'assessment_type': 'Fin', 'score': '100.00'}
# Register.
actions.login(email)
actions.register(self, name)
# Submit answer.
response = actions.submit_assessment(self, 'Fin', post)
assert_equals(response.status_int, 200)
assert_contains('your overall course score of 70%', response.body)
assert_contains('you have passed the course', response.body)
# Check that the result shows up on the profile page.
response = actions.check_profile(self, name)
assert_contains('70', response.body)
assert_contains('100', response.body)
def test_assessments(self):
"""Test assessment scores are properly submitted and summarized."""
course = courses.Course(None, app_context=sites.get_all_courses()[0])
email = 'test_assessments@google.com'
name = 'Test Assessments'
pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}]
pre = {
'assessment_type': 'Pre', 'score': '1.00',
'answers': transforms.dumps(pre_answers)}
mid = {'assessment_type': 'Mid', 'score': '2.00'}
fin = {'assessment_type': 'Fin', 'score': '3.00'}
peer = {'assessment_type': 'ReviewAssessmentExample'}
second_mid = {'assessment_type': 'Mid', 'score': '1.00'}
second_fin = {'assessment_type': 'Fin', 'score': '100000'}
# Register.
actions.login(email)
actions.register(self, name)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_does_not_contain(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Mid', response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
student = models.Student.get_enrolled_student_by_email(email)
# Check that four score objects (corresponding to the four sample
# assessments) exist right now, and that they all have zero
# score.
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
assert assessment['score'] == 0
# Submit assessments and check that the score is updated.
actions.submit_assessment(self, 'Pre', pre)
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
if assessment['id'] == 'Pre':
assert assessment['score'] > 0
else:
assert assessment['score'] == 0
actions.submit_assessment(self, 'Mid', mid)
student = models.Student.get_enrolled_student_by_email(email)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Pre', response.body)
assert_contains(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Fin', response.body)
# Submit the final assessment.
actions.submit_assessment(self, 'Fin', fin)
student = models.Student.get_enrolled_student_by_email(email)
# Submit the sample peer review assessment.
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
student_scores = course.get_all_scores(student)
# This assessment is not considered to be completed until enough
# peer reviews have been submitted.
for assessment in student_scores:
if assessment['id'] == 'ReviewAssessmentExample':
assert assessment['human_graded']
assert not assessment['completed']
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Fin', response.body)
# Check that the overall-score is non-zero.
assert course.get_overall_score(student)
# Check assessment answers.
answers = transforms.loads(
models.StudentAnswersEntity.get_by_key_name(
student.user_id).data)
assert pre_answers == answers['Pre']
# pylint: disable=g-explicit-bool-comparison
assert [] == answers['Mid']
assert [] == answers['Fin']
# pylint: enable-msg=g-explicit-bool-comparison
# Check that scores are recorded properly.
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Try posting a new midcourse exam with a lower score;
# nothing should change.
actions.submit_assessment(self, 'Mid', second_mid)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Now try posting a postcourse exam with a higher score and note
# the changes.
actions.submit_assessment(self, 'Fin', second_fin)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 100000
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 100000)))
finally:
namespace_manager.set_namespace(old_namespace)
def remove_dir(dir_name):
"""Delete a directory."""
logging.info('removing folder: %s', dir_name)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
if os.path.exists(dir_name):
raise Exception('Failed to delete directory: %s' % dir_name)
def clean_dir(dir_name):
"""Clean a directory."""
remove_dir(dir_name)
logging.info('creating folder: %s', dir_name)
os.makedirs(dir_name)
if not os.path.exists(dir_name):
raise Exception('Failed to create directory: %s' % dir_name)
def clone_canonical_course_data(src, dst):
"""Makes a copy of canonical course content."""
clean_dir(dst)
def copytree(name):
shutil.copytree(
os.path.join(src, name),
os.path.join(dst, name))
copytree('assets')
copytree('data')
copytree('views')
shutil.copy(
os.path.join(src, 'course.yaml'),
os.path.join(dst, 'course.yaml'))
# Make all files writable.
for root, unused_dirs, files in os.walk(dst):
for afile in files:
fname = os.path.join(root, afile)
os.chmod(fname, 0o777)
class GeneratedCourse(object):
"""A helper class for a dynamically generated course content."""
@classmethod
def set_data_home(cls, test):
"""All data for this test will be placed here."""
cls.data_home = test.test_tempdir
def __init__(self, ns):
self.path = ns
@property
def namespace(self):
return 'ns%s' % self.path
@property
def title(self):
return u'Power Searching with Google title-%s (тест данные)' % self.path
@property
def unit_title(self):
return u'Interpreting results unit-title-%s (тест данные)' % self.path
@property
def lesson_title(self):
return u'Word order matters lesson-title-%s (тест данные)' % self.path
@property
def head(self):
return '<!-- head-%s -->' % self.path
@property
def css(self):
return '<!-- css-%s -->' % self.path
@property
def home(self):
return os.path.join(self.data_home, 'data-%s' % self.path)
@property
def email(self):
return 'walk_the_course_named_%s@google.com' % self.path
@property
def name(self):
return 'Walk The Course Named %s' % self.path
class MultipleCoursesTestBase(actions.TestBase):
"""Configures several courses for running concurrently."""
def modify_file(self, filename, find, replace):
"""Read, modify and write back the file."""
text = open(filename, 'r').read().decode('utf-8')
# Make sure target text is not in the file.
assert replace not in text
text = text.replace(find, replace)
assert replace in text
open(filename, 'w').write(text.encode('utf-8'))
def modify_canonical_course_data(self, course):
"""Modify canonical content by adding unique bits to it."""
self.modify_file(
os.path.join(course.home, 'course.yaml'),
'title: \'Power Searching with Google\'',
'title: \'%s\'' % course.title)
self.modify_file(
os.path.join(course.home, 'data/unit.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Word order matters,',
',%s,' % course.lesson_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'views/base.html'),
'<head>',
'<head>\n%s' % course.head)
self.modify_file(
os.path.join(course.home, 'assets/css/main.css'),
'html {',
'%s\nhtml {' % course.css)
def prepare_course_data(self, course):
"""Create unique course content for a course."""
clone_canonical_course_data(self.bundle_root, course.home)
self.modify_canonical_course_data(course)
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(MultipleCoursesTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
self.course_a = GeneratedCourse('a')
self.course_b = GeneratedCourse('b')
self.course_ru = GeneratedCourse('ru')
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
clean_dir(GeneratedCourse.data_home)
self.prepare_course_data(self.course_a)
self.prepare_course_data(self.course_b)
self.prepare_course_data(self.course_ru)
# Setup one course for I18N.
self.modify_file(
os.path.join(self.course_ru.home, 'course.yaml'),
'locale: \'en_US\'',
'locale: \'ru\'')
# Configure courses.
sites.setup_courses('%s, %s, %s' % (
'course:/courses/a:/data-a:nsa',
'course:/courses/b:/data-b:nsb',
'course:/courses/ru:/data-ru:nsru'))
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(MultipleCoursesTestBase, self).tearDown()
def walk_the_course(
self, course, first_time=True, is_admin=False, logout=True):
"""Visit a course as a Student would."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Check normal user has no access.
actions.login(course.email, is_admin=is_admin)
# Test schedule.
if first_time:
response = self.testapp.get('/courses/%s/preview' % course.path)
else:
response = self.testapp.get('/courses/%s/course' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.unit_title, response.body)
assert_contains(course.head, response.body)
# Tests static resource.
response = self.testapp.get(
'/courses/%s/assets/css/main.css' % course.path)
assert_contains(course.css, response.body)
if first_time:
# Test registration.
response = self.get('/courses/%s/register' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.head, response.body)
register_form = actions.get_form_by_action(response, 'register')
register_form.set('form01', course.name)
register_form.action = '/courses/%s/register' % course.path
response = self.submit(register_form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers[
'location'])
# Check lesson page.
response = self.testapp.get(
'/courses/%s/unit?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
# Check activity page.
response = self.testapp.get(
'/courses/%s/activity?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
if logout:
actions.logout()
# Clean up.
sites.ApplicationContext.get_environ = get_environ_old
class MultipleCoursesTest(MultipleCoursesTestBase):
"""Test several courses running concurrently."""
def test_courses_are_isolated(self):
"""Test each course serves its own assets, views and data."""
# Pretend students visit courses.
self.walk_the_course(self.course_a)
self.walk_the_course(self.course_b)
self.walk_the_course(self.course_a, first_time=False)
self.walk_the_course(self.course_b, first_time=False)
# Check course namespaced data.
self.validate_course_data(self.course_a)
self.validate_course_data(self.course_b)
# Check default namespace.
assert (
namespace_manager.get_namespace() ==
appengine_config.DEFAULT_NAMESPACE_NAME)
assert not models.Student.all().fetch(1000)
def validate_course_data(self, course):
"""Check course data is valid."""
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(course.namespace)
try:
students = models.Student.all().fetch(1000)
assert len(students) == 1
for student in students:
assert_equals(course.email, student.key().name())
assert_equals(course.name, student.name)
finally:
namespace_manager.set_namespace(old_namespace)
class I18NTest(MultipleCoursesTestBase):
"""Test courses running in different locales and containing I18N content."""
def test_csv_supports_utf8(self):
"""Test UTF-8 content in CSV file is handled correctly."""
title_ru = u'Найди факты быстрее'
csv_file = os.path.join(self.course_ru.home, 'data/unit.csv')
self.modify_file(
csv_file, ',Find facts faster,', ',%s,' % title_ru)
self.modify_file(
os.path.join(self.course_ru.home, 'data/lesson.csv'),
',Find facts faster,', ',%s,' % title_ru)
rows = []
for row in csv.reader(open(csv_file)):
rows.append(row)
assert title_ru == rows[6][3].decode('utf-8')
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains(title_ru, response.body)
# Tests student perspective.
self.walk_the_course(self.course_ru, first_time=True)
self.walk_the_course(self.course_ru, first_time=False)
# Test course author dashboard.
self.walk_the_course(
self.course_ru, first_time=False, is_admin=True, logout=False)
def assert_page_contains(page_name, text_array):
dashboard_url = '/courses/%s/dashboard' % self.course_ru.path
response = self.get('%s?action=%s' % (dashboard_url, page_name))
for text in text_array:
assert_contains(text, response.body)
assert_page_contains('', [
title_ru, self.course_ru.unit_title, self.course_ru.lesson_title])
assert_page_contains(
'assets', [self.course_ru.title])
assert_page_contains(
'settings', [
self.course_ru.title,
vfs.AbstractFileSystem.normpath(self.course_ru.home)])
# Clean up.
actions.logout()
def test_i18n(self):
"""Test course is properly internationalized."""
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains_all_of(
[u'Войти', u'Расписание', u'Курс'], response.body)
class CourseUrlRewritingTestBase(actions.TestBase):
"""Prepare course for using rewrite rules and '/courses/pswg' base URL."""
def setUp(self): # pylint: disable=g-bad-name
super(CourseUrlRewritingTestBase, self).setUp()
self.base = '/courses/pswg'
self.namespace = 'gcb-courses-pswg-tests-ns'
sites.setup_courses('course:%s:/:%s' % (self.base, self.namespace))
def tearDown(self): # pylint: disable=g-bad-name
sites.reset_courses()
super(CourseUrlRewritingTestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Canonicalize URL's using either <base> or self.base."""
# Check if already canonicalized.
if href.startswith(
self.base) or utils.ApplicationHandler.is_absolute(href):
pass
else:
# Look for <base> tag in the response to compute the canonical URL.
if response:
return super(CourseUrlRewritingTestBase, self).canonicalize(
href, response)
# Prepend self.base to compute the canonical URL.
if not href.startswith('/'):
href = '/%s' % href
href = '%s%s' % (self.base, href)
self.audit_url(href)
return href
class VirtualFileSystemTestBase(actions.TestBase):
"""Prepares a course running on a virtual local file system."""
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(VirtualFileSystemTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
home_folder = os.path.join(GeneratedCourse.data_home, 'data-v')
clone_canonical_course_data(self.bundle_root, home_folder)
# Configure course.
self.namespace = 'nsv'
sites.setup_courses('course:/:/data-vfs:%s' % self.namespace)
# Modify app_context filesystem to map /data-v to /data-vfs.
def after_create(unused_cls, instance):
# pylint: disable=protected-access
instance._fs = vfs.AbstractFileSystem(
vfs.LocalReadOnlyFileSystem(
os.path.join(GeneratedCourse.data_home, 'data-vfs'),
home_folder))
sites.ApplicationContext.after_create = after_create
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(VirtualFileSystemTestBase, self).tearDown()
class DatastoreBackedCourseTest(actions.TestBase):
"""Prepares an empty course running on datastore-backed file system."""
def setUp(self): # pylint: disable=g-bad-name
"""Configure the test."""
super(DatastoreBackedCourseTest, self).setUp()
self.supports_editing = True
self.namespace = 'dsbfs'
sites.setup_courses('course:/::%s' % self.namespace)
all_courses = sites.get_all_courses()
assert len(all_courses) == 1
self.app_context = all_courses[0]
def tearDown(self): # pylint: disable=g-bad-name
"""Clean up."""
sites.reset_courses()
super(DatastoreBackedCourseTest, self).tearDown()
def upload_all_in_dir(self, dir_name, files_added):
"""Uploads all files in a folder to vfs."""
root_dir = os.path.join(appengine_config.BUNDLE_ROOT, dir_name)
for root, unused_dirs, files in os.walk(root_dir):
for afile in files:
filename = os.path.join(root, afile)
self.app_context.fs.put(filename, open(filename, 'rb'))
files_added.append(filename)
def init_course_data(self, upload_files):
"""Uploads required course data files into vfs."""
files_added = []
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self.namespace)
upload_files(files_added)
# Normalize paths to be identical for Windows and Linux.
files_added_normpath = []
for file_added in files_added:
files_added_normpath.append(
vfs.AbstractFileSystem.normpath(file_added))
assert self.app_context.fs.list(
appengine_config.BUNDLE_ROOT) == sorted(files_added_normpath)
finally:
namespace_manager.set_namespace(old_namespace)
def upload_all_sample_course_files(self, files_added):
"""Uploads all sample course data files into vfs."""
self.upload_all_in_dir('assets', files_added)
self.upload_all_in_dir('views', files_added)
self.upload_all_in_dir('data', files_added)
course_yaml = os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')
self.app_context.fs.put(course_yaml, open(course_yaml, 'rb'))
files_added.append(course_yaml)
class DatastoreBackedCustomCourseTest(DatastoreBackedCourseTest):
"""Prepares a sample course running on datastore-backed file system."""
def test_course_import(self):
"""Test importing of the course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.namespace = 'ns_test'
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
# Format import payload and URL.
payload_dict = {}
payload_dict['course'] = 'course:/:/'
request = {}
request['payload'] = transforms.dumps(payload_dict)
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
# Check non-logged user has no rights.
response = self.put(import_put_url, {}, expect_errors=True)
assert_equals(404, response.status_int)
# Login as admin.
email = 'test_course_import@google.com'
name = 'Test Course Import'
actions.login(email, is_admin=True)
# Check course is empty.
response = self.get('dashboard')
assert_equals(200, response.status_int)
assert_does_not_contain('Filter image results by color', response.body)
# Import sample course.
request[
'xsrf_token'] = XsrfTokenManager.create_xsrf_token('import-course')
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
response = self.put(import_put_url, {})
assert_equals(200, response.status_int)
assert_contains('Imported.', response.body)
# Check course is not empty.
response = self.get('dashboard')
assert_contains('Filter image results by color', response.body)
# Check assessment is copied.
response = self.get('assets/js/assessment-21.js')
assert_equals(200, response.status_int)
assert_contains('Humane Society website', response.body)
# Check activity is copied.
response = self.get('assets/js/activity-37.js')
assert_equals(200, response.status_int)
assert_contains('explore ways to keep yourself updated', response.body)
unit_2_title = 'Unit 2 - Interpreting results'
lesson_2_1_title = '2.1 When search results suggest something new'
lesson_2_2_title = '2.2 Thinking more deeply about your search'
# Check units and lessons are indexed correctly.
response = actions.register(self, name)
assert (
'http://localhost'
'/test/course'
'#registration_confirmation' == response.location)
response = self.get('course')
assert_contains(unit_2_title, response.body)
# Unit page.
response = self.get('unit?unit=9')
# A unit title.
assert_contains(
unit_2_title, response.body)
# First child lesson without link.
assert_contains(
lesson_2_1_title, response.body)
# Second child lesson with link.
assert_contains(
lesson_2_2_title, response.body)
# Breadcrumbs.
assert_contains_all_of(
['Unit 2</a></li>', 'Lesson 1</li>'], response.body)
# Unit page.
response = self.get('activity?unit=9&lesson=10')
# A unit title.
assert_contains(
unit_2_title, response.body)
# An activity title.
assert_contains(
'Lesson 2.1 Activity', response.body)
# First child lesson without link.
assert_contains(
lesson_2_1_title, response.body)
# Second child lesson with link.
assert_contains(
lesson_2_2_title, response.body)
# Breadcrumbs.
assert_contains_all_of(
['Unit 2</a></li>', 'Lesson 1</a></li>'], response.body)
# Clean up.
sites.reset_courses()
config.Registry.test_overrides = {}
def test_get_put_file(self):
"""Test that one can put/get file via REST interface."""
self.init_course_data(self.upload_all_sample_course_files)
email = 'test_get_put_file@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=settings')
# Check course.yaml edit form.
compute_form = response.forms['edit_course_yaml']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert_contains(
'dashboard?action=edit_settings&key=%2Fcourse.yaml',
response.location)
response = self.get(response.location)
assert_contains('rest/files/item?key=%2Fcourse.yaml', response.body)
# Get text file.
response = self.get('rest/files/item?key=%2Fcourse.yaml')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(
transforms.loads(response.body)['payload'])
assert '/course.yaml' == json_dict['key']
assert 'text/utf-8' == json_dict['encoding']
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')).read(
) == json_dict['content'])
def test_empty_course(self):
"""Test course with no assets and the simplest possible course.yaml."""
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
# Check minimal course page comes up.
response = self.get('course')
assert_contains('UNTITLED COURSE', response.body)
assert_contains('Registration', response.body)
# Check inheritable files are accessible.
response = self.get('/assets/css/main.css')
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'assets/css/main.css')).read(
) == response.body)
# Check non-inheritable files are not inherited.
response = self.testapp.get(
'/assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Login as admin.
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard')
# Add unit.
compute_form = response.forms['add_unit']
response = self.submit(compute_form)
response = self.get('/rest/course/unit?key=1')
assert_equals(response.status_int, 200)
# Add lessons.
response = self.get('dashboard')
compute_form = response.forms['add_lesson']
response = self.submit(compute_form)
response = self.get('/rest/course/lesson?key=2')
assert_equals(response.status_int, 200)
# Add assessment.
response = self.get('dashboard')
compute_form = response.forms['add_assessment']
response = self.submit(compute_form)
response = self.get('/rest/course/assessment?key=3')
assert_equals(response.status_int, 200)
# Add link.
response = self.get('dashboard')
compute_form = response.forms['add_link']
response = self.submit(compute_form)
response = self.get('/rest/course/link?key=4')
assert_equals(response.status_int, 200)
def import_sample_course(self):
"""Imports a sample course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
# Clean up.
sites.reset_courses()
def test_imported_course_performance(self):
"""Tests various pages of the imported course."""
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
# Enable memcache.
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
def custom_inc(unused_increment=1, context=None):
"""A custom inc() function for cache miss counter."""
self.keys.append(context)
self.count += 1
def assert_cached(url, assert_text, cache_miss_allowed=0):
"""Checks that specific URL supports caching."""
memcache.flush_all()
self.keys = []
self.count = 0
# Expect cache misses first time we load page.
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
assert cache_miss_before != self.count
# Expect no cache misses first time we load page.
self.keys = []
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
cache_miss_actual = self.count - cache_miss_before
if cache_miss_actual != cache_miss_allowed:
raise Exception(
'Expected %s cache misses, got %s. Keys are:\n%s' % (
cache_miss_allowed, cache_miss_actual,
'\n'.join(self.keys)))
old_inc = models.CACHE_MISS.inc
models.CACHE_MISS.inc = custom_inc
# Walk the site.
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
assert_cached('preview', 'Putting it all together')
actions.login(email, is_admin=True)
assert_cached('preview', 'Putting it all together')
actions.register(self, name)
assert_cached(
'unit?unit=9', 'When search results suggest something new')
assert_cached(
'unit?unit=9&lesson=12', 'Understand options for different media')
# Clean up.
models.CACHE_MISS.inc = old_inc
sites.ApplicationContext.get_environ = get_environ_old
config.Registry.test_overrides = {}
sites.reset_courses()
def test_imported_course(self):
"""Tests various pages of the imported course."""
# TODO(psimakov): Ideally, this test class should run all aspect tests
# and they all should pass. However, the id's in the cloned course
# do not match the id's of source sample course and we fetch pages
# and assert page content using id's. For now, we will check the minimal
# set of pages manually. Later, we have to make it run all known tests.
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
actions.login(email, is_admin=True)
response = self.get('course')
assert_contains('Putting it all together', response.body)
actions.register(self, name)
actions.check_profile(self, name)
actions.view_announcements(self)
# Check unit page without lesson specified.
response = self.get('unit?unit=9')
assert_contains('Interpreting results', response.body)
assert_contains(
'When search results suggest something new', response.body)
# Check unit page with a lessons.
response = self.get('unit?unit=9&lesson=12')
assert_contains('Interpreting results', response.body)
assert_contains(
'Understand options for different media', response.body)
# Check assesment page.
response = self.get('assessment?name=21')
assert_contains(
'<script src="assets/js/assessment-21.js"></script>', response.body)
# Check activity page.
response = self.get('activity?unit=9&lesson=13')
assert_contains(
'<script src="assets/js/activity-13.js"></script>',
response.body)
# Clean up.
sites.reset_courses()
class DatastoreBackedSampleCourseTest(DatastoreBackedCourseTest):
"""Run all existing tests using datastore-backed file system."""
def setUp(self): # pylint: disable=g-bad-name
super(DatastoreBackedSampleCourseTest, self).setUp()
self.init_course_data(self.upload_all_sample_course_files)
class LessonComponentsTest(DatastoreBackedCourseTest):
"""Test operations that make use of components in a lesson body."""
def setUp(self):
"""Set up the dummy course for each test case in this class."""
super(LessonComponentsTest, self).setUp()
self.course = courses.Course(None, app_context=self.app_context)
self.unit = self.course.add_unit()
self.lesson = self.course.add_lesson(self.unit)
self.lesson.objectives = """
<question quid="123" weight="1" instanceid="QN"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="456" instanceid="QG"></question-group>
yet_more_random_text
"""
self.lesson.has_activity = False
self.course.update_lesson(self.lesson)
self.course.save()
self.tracker = self.course.get_progress_tracker()
def test_component_discovery(self):
"""Test extraction of components from a lesson body."""
cpt_list = self.course.get_components(
self.unit.unit_id, self.lesson.lesson_id)
assert cpt_list == [
{'instanceid': 'QN', 'quid': '123', 'weight': '1',
'cpt_name': 'question'},
{'instanceid': 'VD', 'cpt_name': 'gcb-youtube',
'videoid': 'Kdg2drcUjYI'},
{'instanceid': 'QG', 'qgid': '456', 'cpt_name': 'question-group'}
]
valid_cpt_ids = self.tracker.get_valid_component_ids(
self.unit.unit_id, self.lesson.lesson_id)
self.assertEqual(set(['QN', 'QG']), set(valid_cpt_ids))
def test_component_progress(self):
"""Test that progress tracking for components is done correctly."""
unit_id = self.unit.unit_id
lesson_id = self.lesson.lesson_id
student = models.Student(key_name='lesson-body-test-student')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Visiting the lesson page has no effect on progress, since it contains
# trackable components.
self.tracker.put_html_accessed(student, unit_id, lesson_id)
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Marking progress for a non-existent component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'a')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Marking progress for a non-trackable component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'VD')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Completing a trackable component marks the lesson as in-progress,
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 1, 'activity': 0}
# Completing the same component again has no further effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 1, 'activity': 0}
# Completing the other trackable component marks the lesson (and unit)
# as completed.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QG')
assert self.tracker.get_unit_progress(student)[unit_id] == 2
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 2, 'activity': 0}
class FakeEnvironment(object):
"""Temporary fake tools.etl.remote.Evironment.
Bypasses making a remote_api connection because webtest can't handle it and
we don't want to bring up a local server for our functional tests. When this
fake is used, the in-process datastore stub will handle RPCs.
TODO(johncox): find a way to make webtest successfully emulate the
remote_api endpoint and get rid of this fake.
"""
def __init__(self, application_id, server, path=None):
self._appication_id = application_id
self._path = path
self._server = server
def establish(self):
pass
class EtlMainTestCase(DatastoreBackedCourseTest):
"""Tests tools/etl/etl.py's main()."""
# Allow access to protected members under test.
# pylint: disable=protected-access
def setUp(self):
"""Configures EtlMainTestCase."""
super(EtlMainTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# In etl.main, use test auth scheme to avoid interactive login.
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.archive_path = os.path.join(self.test_tempdir, 'archive.zip')
self.new_course_title = 'New Course Title'
self.url_prefix = '/test'
self.raw = 'course:%s::ns_test' % self.url_prefix
self.swap(os, 'environ', self.test_environ)
self.common_args = [
self.url_prefix, 'myapp', 'localhost:8080']
self.common_command_args = self.common_args + [
'--archive_path', self.archive_path]
self.common_course_args = [etl._TYPE_COURSE] + self.common_command_args
self.common_datastore_args = [
etl._TYPE_DATASTORE] + self.common_command_args
self.delete_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_DATASTORE] + self.common_args)
self.download_course_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args)
self.upload_course_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args)
# Set up courses: version 1.3, version 1.2.
sites.setup_courses(self.raw + ', course:/:/')
def tearDown(self):
sites.reset_courses()
super(EtlMainTestCase, self).tearDown()
def create_app_yaml(self, context, title=None):
yaml = copy.deepcopy(courses.DEFAULT_COURSE_YAML_DICT)
if title:
yaml['course']['title'] = title
context.fs.impl.put(
os.path.join(
appengine_config.BUNDLE_ROOT, etl._COURSE_YAML_PATH_SUFFIX),
etl._ReadWrapper(str(yaml)), is_draft=False)
def create_archive(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
def create_archive_with_question(self, data):
self.upload_all_sample_course_files([])
self.import_sample_course()
question = _add_data_entity(
sites.get_all_courses()[1], models.QuestionEntity, data)
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
return question
def create_empty_course(self, raw):
sites.setup_courses(raw)
context = etl_lib.get_context(self.url_prefix)
course = etl._get_course_from(etl_lib.get_context(self.url_prefix))
course.delete_all()
self.create_app_yaml(context)
def import_sample_course(self):
"""Imports a sample course."""
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
# Patch in a course.yaml.
self.create_app_yaml(dst_app_context, title=self.new_course_title)
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
def test_delete_course_fails(self):
args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_COURSE] + self.common_args)
self.assertRaises(
NotImplementedError,
etl.main, args, environment_class=FakeEnvironment)
def test_delete_datastore_fails_if_user_does_not_confirm(self):
self.swap(
etl, '_raw_input',
lambda x: 'not' + etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
self.assertRaises(
SystemExit, etl.main, self.delete_datastore_args,
environment_class=FakeEnvironment)
def test_delete_datastore_succeeds(self):
"""Tests delete datastore success for populated and empty datastores."""
self.import_sample_course()
context = etl_lib.get_context(
self.delete_datastore_args.course_url_prefix)
self.swap(
etl, '_raw_input',
lambda x: etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
# Spot check that some kinds are populated.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertTrue(vfs.FileDataEntity.all().get())
self.assertTrue(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore with contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
# Spot check that those kinds are now empty.
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertFalse(vfs.FileDataEntity.all().get())
self.assertFalse(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore without contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
def test_disable_remote_cannot_be_passed_for_mode_other_than_run(self):
bad_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--disable_remote'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_download_course_creates_valid_archive(self):
"""Tests download of course data and archive creation."""
self.upload_all_sample_course_files([])
self.import_sample_course()
question = _add_data_entity(
sites.get_all_courses()[0], models.QuestionEntity, 'test question')
etl.main(self.download_course_args, environment_class=FakeEnvironment)
# Don't use Archive and Manifest here because we want to test the raw
# structure of the emitted zipfile.
zip_archive = zipfile.ZipFile(self.archive_path)
# check manifest
manifest = transforms.loads(
zip_archive.open(etl._MANIFEST_FILENAME).read())
self.assertGreaterEqual(
courses.COURSE_MODEL_VERSION_1_3, manifest['version'])
self.assertEqual(
'course:%s::ns_test' % self.url_prefix, manifest['raw'])
# check content
for entity in manifest['entities']:
self.assertTrue(entity.has_key('is_draft'))
self.assertTrue(zip_archive.open(entity['path']))
# check question
question_json = transforms.loads(
zip_archive.open('models/QuestionEntity.json').read())
self.assertEqual(
question.key().id(), question_json['rows'][0]['key.id'])
self.assertEqual(
'test question', question_json['rows'][0]['data'])
def test_download_course_errors_if_archive_path_exists_on_disk(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_errors_if_course_url_prefix_does_not_exist(self):
sites.reset_courses()
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_course_errors_if_course_version_is_pre_1_3(self):
args = etl.PARSER.parse_args(
['download', 'course', '/'] + self.common_course_args[2:])
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, args, environment_class=FakeEnvironment)
def test_download_datastore_fails_if_datastore_types_not_in_datastore(self):
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'missing'])
self.assertRaises(
SystemExit, etl.main, download_datastore_args,
environment_class=FakeEnvironment)
def test_download_datastore_succeeds(self):
"""Test download of datastore data and archive creation."""
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'Student,StudentPropertyEntity'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name='first_student')
second_student = models.Student(key_name='second_student')
first_entity = models.StudentPropertyEntity(
key_name='first_student-property_entity')
second_entity = models.StudentPropertyEntity(
key_name='second_student-property_entity')
db.put([first_student, second_student, first_entity, second_entity])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['Student.json', 'StudentPropertyEntity.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('StudentPropertyEntity.json')][0]
# Ensure .json files are deserializable into Python objects.
students = sorted(
transforms.loads(archive.get(student_entity.path))['rows'],
key=lambda d: d['key.name'])
entities = sorted(
transforms.loads(archive.get(entity_entity.path))['rows'],
key=lambda d: d['key.name'])
# Spot check their contents.
self.assertEqual(
[model.key().name() for model in [first_student, second_student]],
[student['key.name'] for student in students])
self.assertEqual(
[model.key().name() for model in [first_entity, second_entity]],
[entity['key.name'] for entity in entities])
def test_download_datastore_with_privacy_maintains_references(self):
"""Test download of datastore data and archive creation."""
unsafe_user_id = '1'
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'EventEntity,Student', '--privacy',
'--privacy_secret', 'super_seekrit'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
event = models.EventEntity(user_id=unsafe_user_id)
student = models.Student(
key_name='first_student', user_id=unsafe_user_id)
db.put([event, student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['EventEntity.json', 'Student.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
event_entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('EventEntity.json')][0]
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
# Ensure .json files are deserializable into Python objects...
event_entities = transforms.loads(
archive.get(event_entity_entity.path))['rows']
students = transforms.loads(archive.get(student_entity.path))['rows']
# Reference maintained.
self.assertEqual(event_entities[0]['user_id'], students[0]['user_id'])
# But user_id transformed.
self.assertNotEqual(unsafe_user_id, event_entities[0]['user_id'])
self.assertNotEqual(unsafe_user_id, students[0]['user_id'])
def test_privacy_fails_if_not_downloading_datastore(self):
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_privacy_secret_fails_if_not_download_datastore_with_privacy(self):
"""Tests invalid flag combinations related to --privacy."""
missing_privacy = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo'])
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_run_fails_when_delegated_argument_parsing_fails(self):
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl_lib.Job'] + self.common_args +
['--job_args', "'unexpected_argument'"])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_fails_when_if_requested_class_missing_or_invalid(self):
bad_args = etl.PARSER.parse_args(
['run', 'a.missing.class.or.Module'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl.etl._Archive'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_print_memcache_stats_succeeds(self):
"""Tests examples.WriteStudentEmailsToFile prints stats to stdout."""
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.PrintMemcacheStats'] + self.common_args)
memcache.get('key')
memcache.set('key', 1)
memcache.get('key')
old_stdout = sys.stdout
stdout = cStringIO.StringIO()
try:
sys.stdout = stdout
etl.main(args, environment_class=FakeEnvironment)
finally:
sys.stdout = old_stdout
expected = examples.PrintMemcacheStats._STATS_TEMPLATE % {
'byte_hits': 1,
'bytes': 1,
'hits': 1,
'items': 1,
'misses': 1,
'oldest_item_age': 0,
}
self.assertTrue(expected in stdout.getvalue())
def test_run_skips_remote_env_setup_when_disable_remote_passed(self):
args = etl.PARSER.parse_args(
['run', 'tools.etl.etl_lib.Job'] + self.common_args +
['--disable_remote'])
etl.main(args)
def test_run_upload_file_to_course_succeeds(self):
"""Tests upload of a single local file to a course."""
path = os.path.join(self.test_tempdir, 'file')
target = 'assets/file'
remote_path = os.path.join(appengine_config.BUNDLE_ROOT, target)
contents = 'contents'
with open(path, 'w') as f:
f.write(contents)
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.UploadFileToCourse'] +
self.common_args + ['--job_args=%s %s' % (path, target)])
sites.setup_courses(self.raw)
context = etl_lib.get_context(args.course_url_prefix)
self.assertFalse(context.fs.impl.get(remote_path))
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual(contents, context.fs.impl.get(remote_path).read())
def test_run_write_student_emails_to_file_succeeds(self):
"""Tests args passed to and run of examples.WriteStudentEmailsToFile."""
email1 = 'email1@example.com'
email2 = 'email2@example.com'
path = os.path.join(self.test_tempdir, 'emails')
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.WriteStudentEmailsToFile'] +
self.common_args + ['--job_args=%s --batch_size 1' % path])
context = etl_lib.get_context(args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name=email1)
second_student = models.Student(key_name=email2)
db.put([first_student, second_student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual('%s\n%s\n' % (email1, email2), open(path).read())
def test_upload_course_fails_if_archive_cannot_be_opened(self):
sites.setup_courses(self.raw)
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_json_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_JSON_PATH_SUFFIX),
'garbage')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_yaml_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_YAML_PATH_SUFFIX),
'{')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_course_has_non_course_yaml_contents(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_force_overwrite_passed_with_bad_args(self):
self.create_archive()
bad_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + [
'--force_overwrite'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_upload_course_fails_if_no_course_with_url_prefix_found(self):
self.create_archive()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_succeeds(self):
"""Tests upload of archive contents."""
question = self.create_archive_with_question('test question')
self.create_empty_course(self.raw)
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
self.assertNotEqual(self.new_course_title, context.get_title())
etl.main(self.upload_course_args, environment_class=FakeEnvironment)
# check archive content
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities),
len(filesystem_contents) + len(COURSE_CONTENT_ENTITY_FILES))
# check course structure
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
# check entities
for entity in archive.manifest.entities:
_, tail = os.path.split(entity.path)
if tail in COURSE_CONTENT_ENTITY_FILES:
continue
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
# check uploaded question matches original
_assert_identical_data_entity_exists(
sites.get_all_courses()[0], question)
def test_upload_course_with_force_overwrite_succeeds(self):
"""Tests upload into non-empty course with --force_overwrite."""
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
force_overwrite_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args + [
'--force_overwrite'])
etl.main(force_overwrite_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities),
len(filesystem_contents) + len(COURSE_CONTENT_ENTITY_FILES))
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
for entity in archive.manifest.entities:
_, tail = os.path.split(entity.path)
if tail in COURSE_CONTENT_ENTITY_FILES:
continue
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
def test_upload_datastore_fails(self):
upload_datastore_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--datastore_types', 'doesnt_matter'])
self.assertRaises(
NotImplementedError, etl.main, upload_datastore_args,
environment_class=FakeEnvironment)
class EtlPrivacyTransformFunctionTestCase(actions.TestBase):
"""Tests privacy transforms."""
# Testing protected functions. pylint: disable=protected-access
def test_hmac_sha_2_256_is_stable(self):
self.assertEqual(
etl._hmac_sha_2_256('secret', 'value'),
etl._hmac_sha_2_256('secret', 'value'))
def test_is_identity_transform_when_privacy_false(self):
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'no_effect')(1))
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'other_value')(1))
def test_is_hmac_sha_2_256_when_privacy_true(self):
self.assertEqual(
etl._hmac_sha_2_256('secret', 'value'),
etl._get_privacy_transform_fn(True, 'secret')('value'))
# TODO(johncox): re-enable these tests once we figure out how to make webtest
# play nice with remote_api.
class EtlRemoteEnvironmentTestCase(actions.TestBase):
"""Tests tools/etl/remote.py."""
# Method name determined by superclass. pylint: disable=g-bad-name
def setUp(self):
super(EtlRemoteEnvironmentTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# Allow access to protected members under test.
# pylint: disable=protected-access
def disabled_test_can_establish_environment_in_dev_mode(self):
# Stub the call that requires user input so the test runs unattended.
self.swap(__builtin__, 'raw_input', lambda _: 'username')
self.assertEqual(os.environ['SERVER_SOFTWARE'], remote.SERVER_SOFTWARE)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
def disabled_test_can_establish_environment_in_test_mode(self):
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.swap(os, 'environ', self.test_environ)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
class CourseUrlRewritingTest(CourseUrlRewritingTestBase):
"""Run all existing tests using '/courses/pswg' base URL rewrite rules."""
class VirtualFileSystemTest(VirtualFileSystemTestBase):
"""Run all existing tests using virtual local file system."""
class MemcacheTestBase(actions.TestBase):
"""Executes all tests with memcache enabled."""
def setUp(self): # pylint: disable=g-bad-name
super(MemcacheTestBase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self): # pylint: disable=g-bad-name
config.Registry.test_overrides = {}
super(MemcacheTestBase, self).tearDown()
class MemcacheTest(MemcacheTestBase):
"""Executes all tests with memcache enabled."""
class TransformsJsonFileTestCase(actions.TestBase):
"""Tests for models/transforms.py's JsonFile."""
# Method name determined by superclass. pylint: disable=g-bad-name
def setUp(self):
super(TransformsJsonFileTestCase, self).setUp()
# Treat as module-protected. pylint: disable=protected-access
self.path = os.path.join(self.test_tempdir, 'file.json')
self.reader = transforms.JsonFile(self.path)
self.writer = transforms.JsonFile(self.path)
self.first = 1
self.second = {'c': 'c_value', 'd': {'nested': 'e'}}
def tearDown(self):
self.reader.close()
self.writer.close()
super(TransformsJsonFileTestCase, self).tearDown()
def test_round_trip_of_file_with_zero_records(self):
self.writer.open('w')
self.writer.close()
self.reader.open('r')
self.assertEqual([], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': []}, self.reader.read())
def test_round_trip_of_file_with_one_record(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.close()
self.reader.open('r')
self.assertEqual([self.first], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': [self.first]}, self.reader.read())
def test_round_trip_of_file_with_multiple_records(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.write(self.second)
self.writer.close()
self.reader.open('r')
self.assertEqual(
[self.first, self.second], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual(
{'rows': [self.first, self.second]}, self.reader.read())
class ImportActivityTests(DatastoreBackedCourseTest):
"""Functional tests for importing legacy activities into lessons."""
URI = '/rest/course/lesson/activity'
FREETEXT_QUESTION = """
var activity = [
{ questionType: 'freetext',
correctAnswerRegex: /abc/i,
correctAnswerOutput: "Correct.",
incorrectAnswerOutput: "Try again.",
showAnswerOutput: "A hint."
}
];
"""
MULTPLE_CHOICE_QUESTION = """
var activity = [
{questionType: 'multiple choice',
choices: [
['a', false, 'A'],
['b', true, 'B'],
['c', false, 'C'],
['d', false, 'D']
]
}
];
"""
MULTPLE_CHOICE_GROUP_QUESTION = """
var activity = [
{questionType: 'multiple choice group',
questionsList: [
{
questionHTML: 'choose a',
choices: ['aa', 'bb'],
correctIndex: 0
},
{
questionHTML: 'choose b or c',
choices: ['aa', 'bb', 'cc'],
correctIndex: [1, 2]
}
]
allCorrectOutput: 'unused',
someIncorrectOutput: 'also unused'
}
];
"""
def setUp(self):
super(ImportActivityTests, self).setUp()
course = courses.Course(None, app_context=self.app_context)
self.unit = course.add_unit()
self.lesson = course.add_lesson(self.unit)
course.update_lesson(self.lesson)
course.save()
email = 'test_admin@google.com'
actions.login(email, is_admin=True)
def load_dto(self, dao, entity_id):
old_namespace = namespace_manager.get_namespace()
new_namespace = self.app_context.get_namespace_name()
try:
namespace_manager.set_namespace(new_namespace)
return dao.load(entity_id)
finally:
namespace_manager.set_namespace(old_namespace)
def get_response_dict(self, activity_text):
request = {
'xsrf_token': XsrfTokenManager.create_xsrf_token('lesson-edit'),
'key': self.lesson.lesson_id,
'text': activity_text
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
return transforms.loads(response.body)
def get_content_from_service(self, activity_text):
response_dict = self.get_response_dict(activity_text)
self.assertEqual(response_dict['status'], 200)
return transforms.loads(response_dict['payload'])['content']
def test_import_multiple_choice(self):
"""Should be able to import a single multiple choice question."""
content = self.get_content_from_service(self.MULTPLE_CHOICE_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 4)
choices = question.dict['choices']
choices_data = [
['a', 0.0, 'A'], ['b', 1.0, 'B'], ['c', 0.0, 'C'],
['d', 0.0, 'D']]
for i, choice in enumerate(choices):
self.assertEqual(choice['text'], choices_data[i][0])
self.assertEqual(choice['score'], choices_data[i][1])
self.assertEqual(choice['feedback'], choices_data[i][2])
def test_import_multiple_choice_group(self):
"""Should be able to import a single 'multiple choice group'."""
content = self.get_content_from_service(
self.MULTPLE_CHOICE_GROUP_QUESTION)
# The tag links to a question group which embeds two questions
m = re.match((
r'^<question-group qgid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question-group>$'), content)
assert m
quid = m.group(1)
question_group = self.load_dto(models.QuestionGroupDAO, quid)
self.assertEqual(question_group.dict['version'], '1.5')
self.assertEqual(
question_group.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(len(question_group.dict['items']), 2)
items = question_group.dict['items']
self.assertEqual(items[0]['weight'], 1.0)
self.assertEqual(items[1]['weight'], 1.0)
# The first question is multiple choice with single selection
quid = items[0]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #1)'))
self.assertEqual(question.dict['question'], 'choose a')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 2)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], 1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.0)
# The second question is multiple choice with multiple selection
quid = items[1]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #2)'))
self.assertEqual(question.dict['question'], 'choose b or c')
self.assertEqual(question.dict['multiple_selections'], True)
self.assertEqual(len(question.dict['choices']), 3)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], -1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
def test_import_freetext(self):
"""Should be able to import a single feettext question."""
content = self.get_content_from_service(self.FREETEXT_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.SHORT_ANSWER)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['hint'], 'A hint.')
self.assertEqual(question.dict['defaultFeedback'], 'Try again.')
self.assertEqual(len(question.dict['graders']), 1)
grader = question.dict['graders'][0]
self.assertEqual(grader['score'], 1.0)
self.assertEqual(grader['matcher'], 'regex')
self.assertEqual(grader['response'], '/abc/i')
self.assertEqual(grader['feedback'], 'Correct.')
def test_repeated_imports_are_rejected(self):
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 200)
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 412)
self.assertTrue(response_dict['message'].startswith(
'This activity has already been imported.'))
def test_user_must_be_logged_in(self):
actions.logout()
try:
self.get_response_dict(self.FREETEXT_QUESTION)
self.fail('Expected 404')
except AppError:
pass
def test_user_must_have_valid_xsrf_token(self):
request = {
'key': self.lesson.lesson_id,
'text': self.FREETEXT_QUESTION
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
response_dict = transforms.loads(response.body)
self.assertEqual(response_dict['status'], 403)
class NamespaceTest(actions.TestBase):
def test_namespace_context_manager(self):
pre_test_namespace = namespace_manager.get_namespace()
with Namespace('xyzzy'):
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
with Namespace('plugh'):
self.assertEqual(namespace_manager.get_namespace(), 'plugh')
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace)
def test_namespace_context_manager_handles_exception(self):
pre_test_namespace = namespace_manager.get_namespace()
try:
with Namespace('xyzzy'):
self.assertEqual(namespace_manager.get_namespace(), 'xyzzy')
raise RuntimeError('No way, Jose')
except RuntimeError:
pass
self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace)
ALL_COURSE_TESTS = (
StudentAspectTest, AssessmentTest, CourseAuthorAspectTest,
StaticHandlerTest, AdminAspectTest, PeerReviewControllerTest,
PeerReviewDashboardTest, PeerReviewAnalyticsTest)
MemcacheTest.__bases__ += (InfrastructureTest,) + ALL_COURSE_TESTS
CourseUrlRewritingTest.__bases__ += ALL_COURSE_TESTS
VirtualFileSystemTest.__bases__ += ALL_COURSE_TESTS
DatastoreBackedSampleCourseTest.__bases__ += ALL_COURSE_TESTS
| haoyuchen1992/CourseBuilder | tests/functional/test_classes.py | Python | apache-2.0 | 168,170 | [
"VisIt"
] | 85834ca083aae40e30598f3f1b46231a0e26836b375bac1fd889b9f714378361 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity (deprecated).
These classes are deprecated and replaced with `tf.estimator`.
See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| nburn42/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | Python | apache-2.0 | 12,688 | [
"Gaussian"
] | ead03684a3468a7bca35bcb25ebc4d0d52c355e33f534741e3f36f235be20617 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes raw variant information using header information.
Note that for creating instances of the data objects in this module, there is a
factory function create_processed_variant. Other than that function, these
objects should be used as non-mutable in other scopes, hence all mutating
functions are "private".
"""
import enum
import logging
from typing import Any, Dict, List, Set # pylint: disable=unused-import
from apache_beam.io.gcp.internal.clients import bigquery
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.beam_io import vcf_parser
from gcp_variant_transforms.libs import metrics_util
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs import bigquery_sanitizer
from gcp_variant_transforms.libs import infer_headers_util
from gcp_variant_transforms.libs.annotation import annotation_parser
from gcp_variant_transforms.libs.annotation.vep import descriptions
_FIELD_COUNT_ALTERNATE_ALLELE = vcf_parser.FIELD_COUNT_ALTERNATE_ALLELE
# An alias for the header key constants to make referencing easier.
_HeaderKeyConstants = vcf_header_io.VcfParserHeaderKeyConstants
_BigQuerySchemaSanitizer = bigquery_sanitizer.SchemaSanitizer
# Map for casting values with VcfHeaderFieldTypeConstants to Python types.
VCF_TYPE_TO_PY = {vcf_header_io.VcfHeaderFieldTypeConstants.STRING: str,
vcf_header_io.VcfHeaderFieldTypeConstants.FLOAT: float,
vcf_header_io.VcfHeaderFieldTypeConstants.INTEGER: int}
# Counter names
class _CounterEnum(enum.Enum):
VARIANT = 'variant_counter'
ALTERNATE_ALLELE_INFO_MISMATCH = 'alternate_allele_info_mismatch_counter'
ANNOTATION_ALT_MATCH = 'annotation_alt_match_counter'
ANNOTATION_ALT_MINIMAL_AMBIGUOUS = 'annotation_alt_minimal_ambiguous_counter'
ANNOTATION_ALT_MISMATCH = 'annotation_alt_mismatch_counter'
ALLELE_NUM_MISSING = 'allele_num_missing'
ALLELE_NUM_INCORRECT = 'allele_num_incorrect'
class ProcessedVariant():
"""A wrapper around the ``Variant`` class with extra functionality.
Given header file information, this can parse INFO fields that need to be
split and attached to alternates. This is not inherited from
:class:``vcfio.Variant`` as an encapsulation layer and to prefer composition.
"""
def __init__(self, variant):
# type: (vcfio.Variant) -> None
if not isinstance(variant, vcfio.Variant):
raise ValueError('Expected an instance of vcfio.Variant.')
self._variant = variant
self._non_alt_info = {} # type: Dict[str, Any]
self._alternate_datas = [] # type: List[AlternateBaseData]
for a in variant.alternate_bases:
self._alternate_datas.append(AlternateBaseData(a))
def __repr__(self):
return ', '.join(
[str(s) for s in [
self._variant,
self._non_alt_info,
self._alternate_datas]])
def __eq__(self, other):
return (isinstance(other, ProcessedVariant) and
vars(self) == vars(other))
@property
def reference_name(self):
# type: () -> str
return self._variant.reference_name
@property
def start(self):
# type: () -> int
return self._variant.start
@property
def end(self):
# type: () -> int
return self._variant.end
@property
def reference_bases(self):
# type: () -> str
return self._variant.reference_bases
@property
def names(self):
# type: () -> List[str]
return self._variant.names
@property
def quality(self):
# type: () -> float
return self._variant.quality
@property
def filters(self):
# type: () -> List[str]
return self._variant.filters
@property
def hom_ref_calls(self):
# type: () -> List[Tuple(int,str)]
return self._variant.hom_ref_calls
@property
def calls(self):
# type: () -> List[vcfio.VariantCall]
return self._variant.calls
@property
def non_alt_info(self):
# type: () -> Dict[str, Any]
"""Returns the INFO fields that are not alternate base specific.
The type of the values in the map is specified in the VCF header. The values
are copied from the `vcfio.VariantIfno.data` fields of the input variants.
"""
return self._non_alt_info
@property
def alternate_data_list(self):
# type: () -> List[AlternateBaseData]
return self._alternate_datas
class AlternateBaseData():
"""This is to keep all information for a single alternate-bases."""
def __init__(self, alt_bases):
# type: (str) -> None
"""
Args:
alt_bases(str): The alternate bases string for this instance.
"""
self._alt_bases = alt_bases
# Note that `_info` also holds the split annotation fields. For those
# fields, the value in the `_info` dict has a list of dicts itself.
self._info = {} # type: Dict[str, Any]
self._annotation_field_names = set() # type: Set[str]
def __repr__(self):
return ', '.join([str(self._alt_bases), str(self._info)])
def __eq__(self, other):
return (isinstance(other, AlternateBaseData) and
self._alt_bases == other._alt_bases and
self._info == other._info)
@property
def alternate_bases(self):
# type: () -> str
return self._alt_bases
@property
def info(self):
# type: () -> Dict[str, Any]
return self._info
@property
def annotation_field_names(self):
# type: () -> Set[str]
return self._annotation_field_names
class ProcessedVariantFactory():
"""Factory class for creating `ProcessedVariant` instances.
This is the only right way for creating ProcessedVariants in production code.
It uses the header information to process INFO fields and split them between
alternates if needed. In the process, it does some header sanity checking too.
"""
def __init__(
self,
header_fields, # type: vcf_header_io.VcfHeader
split_alternate_allele_info_fields=True, # type: bool
allow_alternate_allele_info_mismatch=False, # type: bool
annotation_fields=None, # type: List[str]
use_allele_num=False, # type: bool
minimal_match=False, # type: bool
infer_annotation_types=False, # type: bool
counter_factory=None # type: metrics_util.CounterFactoryInterface
):
# type: (...) -> None
"""Sets the internal state of the factory class.
Args:
header_fields: Header information used for parsing and splitting INFO
fields of the variant.
split_alternate_allele_info_fields: If True, splits fields with
`field_count='A'` (i.e., one value for each alternate) among alternates.
allow_alternate_allele_info_mismatch: By default (when False), an error
will be raised for INFO fields with `field_count='A'` (i.e. one value
for each alternate base) that do not have the same cardinality as
alternate bases. If True, an error will not be raised and excess values
will be dropped or insufficient values will be set to null. Only
applicable if `split_alternate_allele_info_fields` is True.
annotation_fields: If provided, this is the list of INFO field names that
store variant annotations. The format of how annotations are stored and
their names are extracted from header_fields.
use_allele_num: If True, then "ALLELE_NUM" annotation is used to determine
the index of the ALT that corresponds to an annotation set.
minimal_match: If True, then the --minimal mode of VEP is simulated for
annotation ALT matching.
infer_annotation_types: If True, then warnings will be provided if header
fields fail to contain Info type lines for annotation fields
counter_factory: If provided, it will be used to record counters (e.g. the
number of variants processed).
"""
self._header_fields = header_fields
self._split_alternate_allele_info_fields = (
split_alternate_allele_info_fields)
self._allow_alternate_allele_info_mismatch = (
allow_alternate_allele_info_mismatch)
self._annotation_field_set = set(annotation_fields or [])
cfactory = counter_factory or metrics_util.NoOpCounterFactory()
self._variant_counter = cfactory.create_counter(
_CounterEnum.VARIANT.value)
self._alternate_allele_info_mismatche_counter = cfactory.create_counter(
_CounterEnum.ALTERNATE_ALLELE_INFO_MISMATCH.value)
self._annotation_processor = _AnnotationProcessor(
annotation_fields, self._header_fields, cfactory, use_allele_num,
minimal_match, infer_annotation_types)
self._minimal_match = minimal_match
self._infer_annotation_types = infer_annotation_types
def create_processed_variant(self, variant):
# type: (vcfio.Variant) -> ProcessedVariant
"""The main factory method for creating ProcessedVariants.
Args:
variant (:class:`vcfio.Variant`): The raw variant information.
"""
proc_var = ProcessedVariant(variant)
self._variant_counter.inc()
for key, variant_info_data in variant.info.items():
if key in self._annotation_field_set:
self._annotation_processor.add_annotation_data(
proc_var, key, variant_info_data)
elif self._is_per_alt_info_field(key):
self._add_per_alt_info(proc_var, key, variant_info_data)
else:
proc_var._non_alt_info[key] = variant_info_data
return proc_var
def _add_per_alt_info(self, proc_var, field_name, variant_info_data):
# type: (ProcessedVariant, str, vcfio.VariantInfo) -> None
num_variant_infos = len(variant_info_data)
num_alternate_bases = len(proc_var._alternate_datas)
if num_variant_infos != num_alternate_bases:
error_message = (
'Per alternate INFO field "{}" does not have same cardinality as '
'number of alternates: {} vs {} in variant: "{}"'.format(
field_name, num_variant_infos, num_alternate_bases, proc_var))
self._alternate_allele_info_mismatche_counter.inc()
if self._allow_alternate_allele_info_mismatch:
logging.warning(error_message)
else:
raise ValueError(error_message)
for alt_index in range(min(num_variant_infos, num_alternate_bases)):
proc_var._alternate_datas[alt_index]._info[field_name] = (
variant_info_data[alt_index])
def create_alt_bases_field_schema(self):
# type: () -> bigquery.TableFieldSchema
"""Returns the alternate_bases record compatible with this factory.
Depending on how this class is set up to split INFO fields among alternate
bases, this function produces a compatible alternate_bases record and
returns it which can be added to a bigquery schema by the caller.
"""
alternate_bases_record = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
alternate_bases_record.fields.append(bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES_ALT,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Alternate base.'))
if self._split_alternate_allele_info_fields:
for key, field in self._header_fields.infos.items():
if self._is_num_a(field[_HeaderKeyConstants.NUM]):
alternate_bases_record.fields.append(bigquery.TableFieldSchema(
name=_BigQuerySchemaSanitizer.get_sanitized_field_name(key),
type=bigquery_util.get_bigquery_type_from_vcf_type(
field[_HeaderKeyConstants.TYPE]),
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=_BigQuerySchemaSanitizer.get_sanitized_string(
field[_HeaderKeyConstants.DESC])))
for annot_field in self._annotation_field_set:
if annot_field not in self._header_fields.infos:
raise ValueError('Annotation field {} not found'.format(annot_field))
annotation_descs = descriptions.VEP_DESCRIPTIONS
annotation_record = bigquery.TableFieldSchema(
name=_BigQuerySchemaSanitizer.get_sanitized_field_name(annot_field),
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='List of {} annotations for this alternate.'.format(
annot_field))
annotation_record.fields.append(bigquery.TableFieldSchema(
name=annotation_parser.ANNOTATION_ALT,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='The ALT part of the annotation field.'))
annotation_names_keys = self._gen_annotation_name_key_pairs(annot_field)
for annotation_name, type_key in annotation_names_keys:
if type_key in self._header_fields.infos:
vcf_type = self._header_fields.infos[type_key][
vcf_header_io.VcfParserHeaderKeyConstants.TYPE]
else:
vcf_type = vcf_header_io.VcfHeaderFieldTypeConstants.STRING
if self._infer_annotation_types:
logging.warning(('Annotation field %s has no corresponding header '
'field with id %s to specify type. Using type %s '
'instead.'), annotation_name, type_key, vcf_type)
annotation_record.fields.append(bigquery.TableFieldSchema(
name=_BigQuerySchemaSanitizer.get_sanitized_field_name(
annotation_name),
type=bigquery_util.get_bigquery_type_from_vcf_type(vcf_type),
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=annotation_descs.get(annotation_name, '')))
alternate_bases_record.fields.append(annotation_record)
return alternate_bases_record
def _gen_annotation_name_key_pairs(self, annot_field):
# type: (str) -> (str, str)
annotation_names = annotation_parser.extract_annotation_names(
self._header_fields.infos[annot_field][_HeaderKeyConstants.DESC])
for name in annotation_names:
type_key = infer_headers_util.get_inferred_annotation_type_header_key(
annot_field, name)
yield name, type_key
def gen_annotation_info_type_keys(self):
# type: () -> str
"""Generates all possible key IDs for annotation type info fields.
Yields:
type_key: IDs for info fields added during inferring annotation types. For
example, if annotations fields are ('CSQ', 'CSQ_VT'), and names are
['Gene', 'Impact'], this will yield ('CSQ_Gene_TYPE', 'CSQ_Impact_TYPE',
'CSQ_VT_Gene_TYPE', 'CSQ_VT_Impact_TYPE').
"""
for annot_field in self._annotation_field_set:
for _, type_key in self._gen_annotation_name_key_pairs(annot_field):
yield type_key
def info_is_in_alt_bases(self, info_field_name):
# type: (str) -> bool
if info_field_name not in self._header_fields.infos:
raise ValueError('INFO field {} not found'.format(info_field_name))
is_per_alt_info = self._is_per_alt_info_field(info_field_name)
is_annotation = info_field_name in self._annotation_field_set
return is_per_alt_info or is_annotation
def _is_per_alt_info_field(self, info_field_name):
# type: (str) -> bool
"""Returns true iff `info_field_name` is defined as having Number=A."""
return (
self._split_alternate_allele_info_fields and
info_field_name in self._header_fields.infos and
self._is_num_a(
self._header_fields.infos[info_field_name]
[_HeaderKeyConstants.NUM]))
def _is_num_a(self, field_value):
# Checks for PySAM values for Number='A'.
return field_value == _FIELD_COUNT_ALTERNATE_ALLELE
class _AnnotationProcessor():
"""This is for handling all annotation related logic for variants."""
def __init__(self,
annotation_fields, # type: List[str]
header_fields, # type: vcf_header_io.VcfHeader
counter_factory, # type: metrics_util.CounterFactoryInterface
use_allele_num, # type: bool
minimal_match, # type: bool
infer_annotation_types, # type: bool
):
# type: (...) -> None
"""Creates an instance for adding annotations to `ProcessedVariant` objects.
Note this class is intended to be an auxiliary for ProcessedVariantFactory
and is used for creating annotation related parts of a `ProcessedVariant`
object. So it is an implementation detail and not part of the public API.
Args:
annotation_fields: The list of INFO field names that store variant
annotations. The format of how annotations are stored and their names
are extracted from header_fields.
header_fields: The VCF header information.
infer_annotation_types: If set, then warnings will be provided if header
fields fail to contain Info type lines for annotation fields
"""
self._header_fields = header_fields
self._annotation_names_map = {} # type: Dict[str, List[str]]
for field in annotation_fields or []:
if field not in header_fields.infos:
raise ValueError('{} INFO not found in the header'.format(field))
header_desc = header_fields.infos[field][_HeaderKeyConstants.DESC]
self._annotation_names_map[field] = (
annotation_parser.extract_annotation_names(header_desc))
self._alt_match_counter = counter_factory.create_counter(
_CounterEnum.ANNOTATION_ALT_MATCH.value)
self._alt_minimal_ambiguous_counter = counter_factory.create_counter(
_CounterEnum.ANNOTATION_ALT_MINIMAL_AMBIGUOUS.value)
self._alt_mismatch_counter = counter_factory.create_counter(
_CounterEnum.ANNOTATION_ALT_MISMATCH.value)
self._allele_num_missing_counter = counter_factory.create_counter(
_CounterEnum.ALLELE_NUM_MISSING.value)
self._allele_num_incorrect_counter = counter_factory.create_counter(
_CounterEnum.ALLELE_NUM_INCORRECT.value)
self._use_allele_num = use_allele_num
self._minimal_match = minimal_match
self._infer_annotation_types = infer_annotation_types
def add_annotation_data(self, proc_var, annotation_field_name, data):
# type: (ProcessedVariant, str, List[str]) -> None
"""The main function for adding annotation data to `proc_var`.
This adds the data for annotation INFO field `annotation_field_name` based
on the format specified for it in the header. `data` items are split
among `proc_var._alternate_datas` based on the ALT matching logic.
The only assumption about `proc_var` is that its `_alternate_datas`
has been initialized with valid `AlternateBaseData` objects.
Args:
proc_var: The object to which the annotations are being added.
annotation_field_name: The name of the annotation field, e.g., ANN or CSQ.
data: The data part of the field separated on comma. A single element
of this list looks something like (taken from an Ensembl VEP run):
G|upstream_gene_variant|MODIFIER|PSMF1|ENSG00000125818|...
where the '|' character is the separator. The first element is a way
to identify the allele (one of the ALTs) that this annotation data
refers to. The rest of the elements are annotations corresponding to the
`annotation_field_name` format description in the header, e.g.,
Allele|Consequence|IMPACT|SYMBOL|Gene|...
"""
alt_list = [a.alternate_bases for a in proc_var._alternate_datas]
parser = annotation_parser.Parser(
proc_var.reference_bases, alt_list,
self._annotation_names_map[annotation_field_name], self._use_allele_num,
self._minimal_match)
for annotation_str in data:
try:
ind, annotation_map = parser.parse_and_match_alt(annotation_str)
for name, value in annotation_map.items():
if name == annotation_parser.ANNOTATION_ALT:
continue
type_key = infer_headers_util.get_inferred_annotation_type_header_key(
annotation_field_name, name)
vcf_type = self._vcf_type_from_annotation_header(
annotation_field_name, type_key)
typed_value = VCF_TYPE_TO_PY[vcf_type](value) if value else None
annotation_map[name] = typed_value
self._alt_match_counter.inc()
alt_datas = proc_var._alternate_datas[ind]
if annotation_field_name not in alt_datas._info:
alt_datas._info[annotation_field_name] = [annotation_map]
else:
alt_datas._info[annotation_field_name].append(annotation_map)
alt_datas.annotation_field_names.add(annotation_field_name)
except annotation_parser.AnnotationParserException as e:
logging.warning(
'Parsing of annotation field %s failed at reference %s start %d: '
'%s', annotation_field_name, proc_var.reference_name,
proc_var.start, str(e))
if isinstance(e, annotation_parser.AnnotationAltNotFound):
self._alt_mismatch_counter.inc()
elif isinstance(e, annotation_parser.AlleleNumMissing):
self._allele_num_missing_counter.inc()
elif isinstance(e, annotation_parser.InvalidAlleleNumValue):
self._allele_num_incorrect_counter.inc()
elif isinstance(e, annotation_parser.AmbiguousAnnotationAllele):
self._alt_minimal_ambiguous_counter.inc()
def _vcf_type_from_annotation_header(self, annotation_name, type_key):
# type: (str, str) -> str
if type_key in self._header_fields.infos:
vcf_type = self._header_fields.infos[type_key][_HeaderKeyConstants.TYPE]
else:
vcf_type = vcf_header_io.VcfHeaderFieldTypeConstants.STRING
if self._infer_annotation_types:
logging.warning(('Annotation field %s has no corresponding header '
'field with id %s to specify type. Using type %s '
'instead.'), annotation_name, type_key, vcf_type)
return vcf_type
| googlegenomics/gcp-variant-transforms | gcp_variant_transforms/libs/processed_variant.py | Python | apache-2.0 | 22,796 | [
"pysam"
] | 577787c68ef17e4332d48c388d3a1ded96c73124e758e73e86da010ae4368670 |
# TODO check num_threads before testing / 8 for Cisco Server
# TODO ATTTENTION! Maybe there are some mistakes in neuron parameters! Write to alexey.panzer@gmail.com.
from func import *
logger = logging.getLogger('neuromodulation')
startbuild = datetime.datetime.now()
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True, 'local_num_threads': 4, 'resolution': 0.1})
generate_neurons()
logger.debug("* * * Start connection initialisation")
connect(motor[motor_cortex], lc[locus_coeruleus], syn_type=Glu, weight_coef=0.005)
connect(pf[prefrontal_cortex], lc[locus_coeruleus], syn_type=Glu, weight_coef=0.005)
connect(pf[prefrontal_cortex], bnst[bed_nucleus_of_the_stria_terminalis], syn_type=Glu, weight_coef=0.005)
connect(pgi[nucleus_paragigantocellularis_lateralis], lc[locus_coeruleus])
connect(pgi[nucleus_paragigantocellularis_lateralis], lc[locus_coeruleus], syn_type=Glu, weight_coef=0.005)
connect(prh[perirhinal_cortex], lc[locus_coeruleus])
connect(ldt[laterodorsal_tegmentum], lc[locus_coeruleus], syn_type=Glu, weight_coef=0.005)
connect(ldt[laterodorsal_tegmentum], bnst[bed_nucleus_of_the_stria_terminalis], syn_type=Glu, weight_coef=0.005)
if nora_flag:
#TODO
logger.debug("* * * Making neuromodulating connections...")
vt_ex = nest.Create('volume_transmitter')
vt_in = nest.Create('volume_transmitter')
NORA_synparams_ex['vt'] = vt_ex[0]
NORA_synparams_in['vt'] = vt_in[0]
nest.CopyModel('stdp_dopamine_synapse', nora_model_ex, NORA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', nora_model_in, NORA_synparams_in)
connect(nts[nts_a1], lc[locus_coeruleus], syn_type=DA_ex, weight_coef=0.005)
connect(nts[nts_a2], lc[locus_coeruleus], syn_type=DA_ex, weight_coef=0.005)
connect(nts[nts_a2], lc[locus_coeruleus], syn_type=DA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatumR], syn_type=DA_ex, weight_coef=0.005)
connect(nts[nts_a2], bnst[bed_nucleus_of_the_stria_terminalis], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], pf[prefrontal_cortex], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], motor[motor_cortex], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], vta[vta_a1], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], rn[rn_a1], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], rn[rn_a2], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], ldt[LDT_a1], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], ldt[LDT_a2], syn_type=DA_ex, weight_coef=0.005)
connect(lc[locus_coeruleus], striatum[striatumR], syn_type=DA_ex, weight_coef=0.005)
connect(vta[ventral_tegmental_area], lc[lc_D1], syn_type=DA_in, weight_coef=0.005)
connect(vta[ventral_tegmental_area], lc[lc_D2], syn_type=DA_in, weight_coef=0.005)
"""
# Connect the volume transmitter to the parts
vt_ex = nest.Create('volume_transmitter')
vt_in = nest.Create('volume_transmitter')
NORA_synparams_ex['vt'] = vt_ex[0]
NORA_synparams_in['vt'] = vt_in[0]
nest.CopyModel('stdp_dopamine_synapse', nora_model_ex, NORA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', nora_model_in, NORA_synparams_in)
nest.Connect(snc[snc_DA][k_IDs], vt_ex)
nest.Connect(snc[snc_DA][k_IDs], vt_in)
nest.Connect(vta[vta_DA0][k_IDs], vt_ex)
nest.Connect(vta[vta_DA1][k_IDs], vt_ex)
# NIGROSTRIATAL
connect(snc[snc_DA], striatum[D1], syn_type=NR_ex)
connect(snc[snc_DA], gpe[gpe_GABA], syn_type=NR_ex)
connect(snc[snc_DA], stn[stn_Glu], syn_type=NR_ex)
connect(snc[snc_DA], nac[nac_GABA0], syn_type=NR_ex)
connect(snc[snc_DA], nac[nac_GABA1], syn_type=NR_ex)
connect(snc[snc_DA], striatum[D2], syn_type=DA_in)
connect(snc[snc_DA], striatum[tan], syn_type=DA_in)
# MESOCORTICOLIMBIC
connect(vta[vta_DA0], striatum[D1], syn_type=NR_ex)
connect(vta[vta_DA0], striatum[D2], syn_type=DA_in)
connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=NR_ex)
connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=NR_ex)
connect(vta[vta_DA1], nac[nac_GABA0], syn_type=NR_ex)
connect(vta[vta_DA1], nac[nac_GABA1], syn_type=NR_ex)
"""
logger.debug("* * * Creating spike generators...")
if generator_flag:
"""
connect_generator(motor[motor_Glu0], rate=300, coef_part=1)
connect_generator(pptg[pptg_GABA], 400., 600., rate=250, coef_part=1)
connect_generator(pptg[pptg_Glu], 400., 600., rate=250, coef_part=1)
connect_generator(pptg[pptg_ACh], 400., 600., rate=250, coef_part=1)
connect_generator(amygdala[amygdala_Glu], 400., 600., rate=250, coef_part=1)
connect_generator(snc[snc_DA], 400., 600., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], 400., 600., rate=250, coef_part=1)
"""
logger.debug("* * * Attaching spikes detector")
connect_detector(lc[locus_coeruleus])
connect_detector(motor[motor_cortex])
"""
connect_detector(gpe[gpe_GABA])
connect_detector(stn[stn_Glu])
connect_detector(snc[snc_DA])
connect_detector(thalamus[thalamus_Glu])
connect_detector(striatum[tan])
connect_detector(striatum[D1])
connect_detector(striatum[D2])
connect_detector(motor[motor_Glu1])
connect_detector(motor[motor_Glu0])
connect_detector(prefrontal[pfc_Glu0])
connect_detector(vta[vta_DA0])
connect_detector(vta[vta_DA1])
connect_detector(snc[snc_DA])
"""
logger.debug("* * * Attaching multimeters")
connect_multimeter(lc[locus_coeruleus])
connect_multimeter(motor[motor_cortex])
"""
connect_multimeter(gpe[gpe_GABA])
connect_multimeter(stn[stn_Glu])
connect_multimeter(snc[snc_DA])
connect_multimeter(thalamus[thalamus_Glu])
connect_multimeter(striatum[tan])
connect_multimeter(striatum[D1])
connect_multimeter(striatum[D2])
connect_multimeter(motor[motor_Glu1])
connect_multimeter(motor[motor_Glu0])
connect_multimeter(prefrontal[pfc_Glu0])
connect_multimeter(vta[vta_DA0])
connect_multimeter(vta[vta_DA1])
connect_multimeter(snc[snc_DA])
"""
del generate_neurons, connect, connect_generator, connect_detector, connect_multimeter
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(GUI=statusGUI) | research-team/NEUCOGAR | NEST/cube/noradrenaline/scripts/neuromodulation.py | Python | gpl-2.0 | 6,161 | [
"NEURON"
] | 89d116676ecc9f7ef366d1569a6e735bc445a2b93904f25ad4e1bbdd98ca4d2e |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 29 10:34:10 2019
@author: cwhanse
"""
import numpy as np
def fit_sdm_cec_sam(celltype, v_mp, i_mp, v_oc, i_sc, alpha_sc, beta_voc,
gamma_pmp, cells_in_series, temp_ref=25):
"""
Estimates parameters for the CEC single diode model (SDM) using the SAM
SDK.
Parameters
----------
celltype : str
Value is one of 'monoSi', 'multiSi', 'polySi', 'cis', 'cigs', 'cdte',
'amorphous'
v_mp : float
Voltage at maximum power point [V]
i_mp : float
Current at maximum power point [A]
v_oc : float
Open circuit voltage [V]
i_sc : float
Short circuit current [A]
alpha_sc : float
Temperature coefficient of short circuit current [A/C]
beta_voc : float
Temperature coefficient of open circuit voltage [V/C]
gamma_pmp : float
Temperature coefficient of power at maximum point point [%/C]
cells_in_series : int
Number of cells in series
temp_ref : float, default 25
Reference temperature condition [C]
Returns
-------
I_L_ref : float
The light-generated current (or photocurrent) at reference
conditions [A]
I_o_ref : float
The dark or diode reverse saturation current at reference
conditions [A]
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
a_ref : float
The product of the usual diode ideality factor ``n`` (unitless),
number of cells in series ``Ns``, and cell thermal voltage at
reference conditions [V]
Adjust : float
The adjustment to the temperature coefficient for short circuit
current, in percent.
Raises
------
ImportError
If NREL-PySAM is not installed.
RuntimeError
If parameter extraction is not successful.
Notes
-----
Inputs ``v_mp``, ``v_oc``, ``i_mp`` and ``i_sc`` are assumed to be from a
single IV curve at constant irradiance and cell temperature. Irradiance is
not explicitly used by the fitting procedure. The irradiance level at which
the input IV curve is determined and the specified cell temperature
``temp_ref`` are the reference conditions for the output parameters
``I_L_ref``, ``I_o_ref``, ``R_sh_ref``, ``R_s``, ``a_ref`` and ``Adjust``.
References
----------
.. [1] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
"""
try:
from PySAM import PySSC
except ImportError:
raise ImportError("Requires NREL's PySAM package at "
"https://pypi.org/project/NREL-PySAM/.")
datadict = {'tech_model': '6parsolve', 'financial_model': None,
'celltype': celltype, 'Vmp': v_mp,
'Imp': i_mp, 'Voc': v_oc, 'Isc': i_sc, 'alpha_isc': alpha_sc,
'beta_voc': beta_voc, 'gamma_pmp': gamma_pmp,
'Nser': cells_in_series, 'Tref': temp_ref}
result = PySSC.ssc_sim_from_dict(datadict)
if result['cmod_success'] == 1:
return tuple([result[k] for k in ['Il', 'Io', 'Rsh', 'Rs', 'a',
'Adj']])
else:
raise RuntimeError('Parameter estimation failed')
def fit_sde_sandia(voltage, current, v_oc=None, i_sc=None, v_mp_i_mp=None,
vlim=0.2, ilim=0.1):
r"""
Fits the single diode equation (SDE) to an IV curve.
Parameters
----------
voltage : ndarray
1D array of `float` type containing voltage at each point on the IV
curve, increasing from 0 to ``v_oc`` inclusive [V]
current : ndarray
1D array of `float` type containing current at each point on the IV
curve, from ``i_sc`` to 0 inclusive [A]
v_oc : float, default None
Open circuit voltage [V]. If not provided, ``v_oc`` is taken as the
last point in the ``voltage`` array.
i_sc : float, default None
Short circuit current [A]. If not provided, ``i_sc`` is taken as the
first point in the ``current`` array.
v_mp_i_mp : tuple of float, default None
Voltage, current at maximum power point in units of [V], [A].
If not provided, the maximum power point is found at the maximum of
``voltage`` \times ``current``.
vlim : float, default 0.2
Defines portion of IV curve where the exponential term in the single
diode equation can be neglected, i.e.
``voltage`` <= ``vlim`` x ``v_oc`` [V]
ilim : float, default 0.1
Defines portion of the IV curve where the exponential term in the
single diode equation is signficant, approximately defined by
``current`` < (1 - ``ilim``) x ``i_sc`` [A]
Returns
-------
photocurrent : float
photocurrent [A]
saturation_current : float
dark (saturation) current [A]
resistance_shunt : float
shunt (parallel) resistance, in ohms
resistance_series : float
series resistance, in ohms
nNsVth : float
product of thermal voltage ``Vth`` [V], diode ideality factor
``n``, and number of series cells ``Ns``
Raises
------
RuntimeError
If parameter extraction is not successful.
Notes
-----
Inputs ``voltage``, ``current``, ``v_oc``, ``i_sc`` and ``v_mp_i_mp`` are
assumed to be from a single IV curve at constant irradiance and cell
temperature.
:py:func:`fit_single_diode_sandia` obtains values for the five parameters
for the single diode equation [1]_:
.. math::
I = I_{L} - I_{0} (\exp \frac{V + I R_{s}}{nNsVth} - 1)
- \frac{V + I R_{s}}{R_{sh}}
See :py:func:`pvsystem.singlediode` for definition of the parameters.
The extraction method [2]_ proceeds in six steps.
1. In the single diode equation, replace :math:`R_{sh} = 1/G_{p}` and
re-arrange
.. math::
I = \frac{I_{L}}{1 + G_{p} R_{s}} - \frac{G_{p} V}{1 + G_{p} R_{s}}
- \frac{I_{0}}{1 + G_{p} R_{s}} (\exp(\frac{V + I R_{s}}{nNsVth}) - 1)
2. The linear portion of the IV curve is defined as
:math:`V \le vlim \times v_oc`. Over this portion of the IV curve,
.. math::
\frac{I_{0}}{1 + G_{p} R_{s}} (\exp(\frac{V + I R_{s}}{nNsVth}) - 1)
\approx 0
3. Fit the linear portion of the IV curve with a line.
.. math::
I &\approx \frac{I_{L}}{1 + G_{p} R_{s}} - \frac{G_{p} V}{1 + G_{p}
R_{s}} \\
&= \beta_{0} + \beta_{1} V
4. The exponential portion of the IV curve is defined by
:math:`\beta_{0} + \beta_{1} \times V - I > ilim \times i_sc`.
Over this portion of the curve, :math:`exp((V + IRs)/nNsVth) >> 1`
so that
.. math::
\exp(\frac{V + I R_{s}}{nNsVth}) - 1 \approx
\exp(\frac{V + I R_{s}}{nNsVth})
5. Fit the exponential portion of the IV curve.
.. math::
\log(\beta_{0} - \beta_{1} V - I)
&\approx \log(\frac{I_{0}}{1 + G_{p} R_{s}} + \frac{V}{nNsVth}
+ \frac{I R_{s}}{nNsVth} \\
&= \beta_{2} + beta_{3} V + \beta_{4} I
6. Calculate values for ``IL, I0, Rs, Rsh,`` and ``nNsVth`` from the
regression coefficents :math:`\beta_{0}, \beta_{1}, \beta_{3}` and
:math:`\beta_{4}`.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] C. B. Jones, C. W. Hansen, Single Diode Parameter Extraction from
In-Field Photovoltaic I-V Curves on a Single Board Computer, 46th IEEE
Photovoltaic Specialist Conference, Chicago, IL, 2019
"""
# If not provided, extract v_oc, i_sc, v_mp and i_mp from the IV curve data
if v_oc is None:
v_oc = voltage[-1]
if i_sc is None:
i_sc = current[0]
if v_mp_i_mp is not None:
v_mp, i_mp = v_mp_i_mp
else:
v_mp, i_mp = _find_mp(voltage, current)
# Find beta0 and beta1 from linear portion of the IV curve
beta0, beta1 = _find_beta0_beta1(voltage, current, vlim, v_oc)
# Find beta3 and beta4 from the exponential portion of the IV curve
beta3, beta4 = _find_beta3_beta4(voltage, current, beta0, beta1, ilim,
i_sc)
# calculate single diode parameters from regression coefficients
return _calculate_sde_parameters(beta0, beta1, beta3, beta4, v_mp, i_mp,
v_oc)
def fit_sdm_desoto(v_mp, i_mp, v_oc, i_sc, alpha_sc, beta_voc,
cells_in_series, EgRef=1.121, dEgdT=-0.0002677,
temp_ref=25, irrad_ref=1000, root_kwargs={}):
"""
Calculates the parameters for the De Soto single diode model.
This procedure (described in [1]_) has the advantage of
using common specifications given by manufacturers in the
datasheets of PV modules.
The solution is found using the scipy.optimize.root() function,
with the corresponding default solver method 'hybr'.
No restriction is put on the fit variables, i.e. series
or shunt resistance could go negative. Nevertheless, if it happens,
check carefully the inputs and their units; alpha_sc and beta_voc are
often given in %/K in manufacturers datasheets and should be given
in A/K and V/K here.
The parameters returned by this function can be used by
pvsystem.calcparams_desoto to calculate the values at different
irradiance and cell temperature.
Parameters
----------
v_mp: float
Module voltage at the maximum-power point at reference conditions [V].
i_mp: float
Module current at the maximum-power point at reference conditions [A].
v_oc: float
Open-circuit voltage at reference conditions [V].
i_sc: float
Short-circuit current at reference conditions [A].
alpha_sc: float
The short-circuit current (i_sc) temperature coefficient of the
module [A/K].
beta_voc: float
The open-circuit voltage (v_oc) temperature coefficient of the
module [V/K].
cells_in_series: integer
Number of cell in the module.
EgRef: float, default 1.121 eV - value for silicon
Energy of bandgap of semi-conductor used [eV]
dEgdT: float, default -0.0002677 - value for silicon
Variation of bandgap according to temperature [eV/K]
temp_ref: float, default 25
Reference temperature condition [C]
irrad_ref: float, default 1000
Reference irradiance condition [W/m2]
root_kwargs: dictionary, default None
Dictionary of arguments to pass onto scipy.optimize.root()
Returns
-------
Dictionary with the following elements:
* ``I_L_ref`` (float) --
Light-generated current at reference conditions [A]
* ``I_o_ref`` (float) --
Diode saturation current at reference conditions [A]
* ``R_s`` (float) --
Series resistance [ohms]
* ``R_sh_ref`` (float) --
Shunt resistance at reference conditions [ohms].
* ``a_ref`` (float) --
Modified ideality factor at reference conditions.
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
* ``alpha_sc`` (float) --
The short-circuit current (i_sc) temperature coefficient of the
module [A/K].
* ``EgRef`` (float) --
Energy of bandgap of semi-conductor used [eV]
* ``dEgdT`` (float) --
Variation of bandgap according to temperature [eV/K]
* ``irrad_ref`` (float) --
Reference irradiance condition [W/m2]
* ``temp_ref`` (float) --
Reference temperature condition [C]
scipy.optimize.OptimizeResult
Optimization result of scipy.optimize.root().
See scipy.optimize.OptimizeResult for more details.
References
----------
.. [1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] John A Duffie, William A Beckman, "Solar Engineering of Thermal
Processes", Wiley, 2013
"""
try:
from scipy.optimize import root
from scipy import constants
except ImportError:
raise ImportError("The fit_sdm_desoto function requires scipy.")
# Constants
k = constants.value('Boltzmann constant in eV/K')
Tref = temp_ref + 273.15 # [K]
# initial guesses of variables for computing convergence:
# Values are taken from [2], p753
Rsh_0 = 100.0
a_0 = 1.5*k*Tref*cells_in_series
IL_0 = i_sc
Io_0 = i_sc * np.exp(-v_oc/a_0)
Rs_0 = (a_0*np.log1p((IL_0-i_mp)/Io_0) - v_mp)/i_mp
# params_i : initial values vector
params_i = np.array([IL_0, Io_0, a_0, Rsh_0, Rs_0])
# specs of module
specs = (i_sc, v_oc, i_mp, v_mp, beta_voc, alpha_sc, EgRef, dEgdT,
Tref, k)
# computing with system of equations described in [1]
optimize_result = root(_system_of_equations_desoto, x0=params_i,
args=(specs,), **root_kwargs)
if optimize_result.success:
sdm_params = optimize_result.x
else:
raise RuntimeError(
'Parameter estimation failed:\n' + optimize_result.message)
# results
return ({'I_L_ref': sdm_params[0],
'I_o_ref': sdm_params[1],
'a_ref': sdm_params[2],
'R_sh_ref': sdm_params[3],
'R_s': sdm_params[4],
'alpha_sc': alpha_sc,
'EgRef': EgRef,
'dEgdT': dEgdT,
'irrad_ref': irrad_ref,
'temp_ref': temp_ref},
optimize_result)
def _find_mp(voltage, current):
"""
Finds voltage and current at maximum power point.
Parameters
----------
voltage : ndarray
1D array containing voltage at each point on the IV curve, increasing
from 0 to v_oc inclusive, of `float` type [V]
current : ndarray
1D array containing current at each point on the IV curve, decreasing
from i_sc to 0 inclusive, of `float` type [A]
Returns
-------
v_mp, i_mp : tuple
voltage ``v_mp`` and current ``i_mp`` at the maximum power point [V],
[A]
"""
p = voltage * current
idx = np.argmax(p)
return voltage[idx], current[idx]
def _calc_I0(IL, I, V, Gp, Rs, nNsVth):
return (IL - I - Gp * V - Gp * Rs * I) / np.exp((V + Rs * I) / nNsVth)
def _find_beta0_beta1(v, i, vlim, v_oc):
# Get intercept and slope of linear portion of IV curve.
# Start with V =< vlim * v_oc, extend by adding points until slope is
# negative (downward).
beta0 = np.nan
beta1 = np.nan
first_idx = np.searchsorted(v, vlim * v_oc)
for idx in range(first_idx, len(v)):
coef = np.polyfit(v[:idx], i[:idx], deg=1)
if coef[0] < 0:
# intercept term
beta0 = coef[1].item()
# sign change of slope to get positive parameter value
beta1 = -coef[0].item()
break
if any(np.isnan([beta0, beta1])):
raise RuntimeError("Parameter extraction failed: beta0={}, beta1={}"
.format(beta0, beta1))
else:
return beta0, beta1
def _find_beta3_beta4(voltage, current, beta0, beta1, ilim, i_sc):
# Subtract the IV curve from the linear fit.
y = beta0 - beta1 * voltage - current
x = np.array([np.ones_like(voltage), voltage, current]).T
# Select points where y > ilim * i_sc to regress log(y) onto x
idx = (y > ilim * i_sc)
result = np.linalg.lstsq(x[idx], np.log(y[idx]), rcond=None)
coef = result[0]
beta3 = coef[1].item()
beta4 = coef[2].item()
if any(np.isnan([beta3, beta4])):
raise RuntimeError("Parameter extraction failed: beta3={}, beta4={}"
.format(beta3, beta4))
else:
return beta3, beta4
def _calculate_sde_parameters(beta0, beta1, beta3, beta4, v_mp, i_mp, v_oc):
nNsVth = 1.0 / beta3
Rs = beta4 / beta3
Gp = beta1 / (1.0 - Rs * beta1)
Rsh = 1.0 / Gp
IL = (1 + Gp * Rs) * beta0
# calculate I0
I0_vmp = _calc_I0(IL, i_mp, v_mp, Gp, Rs, nNsVth)
I0_voc = _calc_I0(IL, 0, v_oc, Gp, Rs, nNsVth)
if any(np.isnan([I0_vmp, I0_voc])) or ((I0_vmp <= 0) and (I0_voc <= 0)):
raise RuntimeError("Parameter extraction failed: I0 is undetermined.")
elif (I0_vmp > 0) and (I0_voc > 0):
I0 = 0.5 * (I0_vmp + I0_voc)
elif (I0_vmp > 0):
I0 = I0_vmp
else: # I0_voc > 0
I0 = I0_voc
return (IL, I0, Rsh, Rs, nNsVth)
def _system_of_equations_desoto(params, specs):
"""Evaluates the systems of equations used to solve for the single
diode equation parameters. Function designed to be used by
scipy.optimize.root() in fit_sdm_desoto().
Parameters
----------
params: ndarray
Array with parameters of the De Soto single diode model. Must be
given in the following order: IL, Io, a, Rsh, Rs
specs: tuple
Specifications of pv module given by manufacturer. Must be given
in the following order: Isc, Voc, Imp, Vmp, beta_oc, alpha_sc
Returns
-------
system of equations to solve with scipy.optimize.root().
References
----------
.. [1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] John A Duffie, William A Beckman, "Solar Engineering of Thermal
Processes", Wiley, 2013
"""
# six input known variables
Isc, Voc, Imp, Vmp, beta_oc, alpha_sc, EgRef, dEgdT, Tref, k = specs
# five parameters vector to find
IL, Io, a, Rsh, Rs = params
# five equation vector
y = [0, 0, 0, 0, 0]
# 1st equation - short-circuit - eq(3) in [1]
y[0] = Isc - IL + Io * np.expm1(Isc * Rs / a) + Isc * Rs / Rsh
# 2nd equation - open-circuit Tref - eq(4) in [1]
y[1] = -IL + Io * np.expm1(Voc / a) + Voc / Rsh
# 3rd equation - Imp & Vmp - eq(5) in [1]
y[2] = Imp - IL + Io * np.expm1((Vmp + Imp * Rs) / a) \
+ (Vmp + Imp * Rs) / Rsh
# 4th equation - Pmp derivated=0 - eq23.2.6 in [2]
# caution: eq(6) in [1] has a sign error
y[3] = Imp \
- Vmp * ((Io / a) * np.exp((Vmp + Imp * Rs) / a) + 1.0 / Rsh) \
/ (1.0 + (Io * Rs / a) * np.exp((Vmp + Imp * Rs) / a) + Rs / Rsh)
# 5th equation - open-circuit T2 - eq (4) at temperature T2 in [1]
T2 = Tref + 2
Voc2 = (T2 - Tref) * beta_oc + Voc # eq (7) in [1]
a2 = a * T2 / Tref # eq (8) in [1]
IL2 = IL + alpha_sc * (T2 - Tref) # eq (11) in [1]
Eg2 = EgRef * (1 + dEgdT * (T2 - Tref)) # eq (10) in [1]
Io2 = Io * (T2 / Tref)**3 * np.exp(1 / k * (EgRef/Tref - Eg2/T2)) # eq (9)
y[4] = -IL2 + Io2 * np.expm1(Voc2 / a2) + Voc2 / Rsh # eq (4) at T2
return y
| anomam/pvlib-python | pvlib/ivtools.py | Python | bsd-3-clause | 19,237 | [
"pysam"
] | d5296d39da0a9a5efc8f27d3b2abb339fe0467e99f6ed002341a6d7dd3e24447 |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DrugCoverage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, plan_id=None, drug_package_id=None, med_id=None, quantity_limit=None, prior_authorization=None, step_therapy=None, tier=None):
"""
DrugCoverage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'plan_id': 'str',
'drug_package_id': 'str',
'med_id': 'int',
'quantity_limit': 'bool',
'prior_authorization': 'bool',
'step_therapy': 'bool',
'tier': 'str'
}
self.attribute_map = {
'plan_id': 'plan_id',
'drug_package_id': 'drug_package_id',
'med_id': 'med_id',
'quantity_limit': 'quantity_limit',
'prior_authorization': 'prior_authorization',
'step_therapy': 'step_therapy',
'tier': 'tier'
}
self._plan_id = plan_id
self._drug_package_id = drug_package_id
self._med_id = med_id
self._quantity_limit = quantity_limit
self._prior_authorization = prior_authorization
self._step_therapy = step_therapy
self._tier = tier
@property
def plan_id(self):
"""
Gets the plan_id of this DrugCoverage.
Health Insurance Oversight System id
:return: The plan_id of this DrugCoverage.
:rtype: str
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id):
"""
Sets the plan_id of this DrugCoverage.
Health Insurance Oversight System id
:param plan_id: The plan_id of this DrugCoverage.
:type: str
"""
self._plan_id = plan_id
@property
def drug_package_id(self):
"""
Gets the drug_package_id of this DrugCoverage.
NDC package code
:return: The drug_package_id of this DrugCoverage.
:rtype: str
"""
return self._drug_package_id
@drug_package_id.setter
def drug_package_id(self, drug_package_id):
"""
Sets the drug_package_id of this DrugCoverage.
NDC package code
:param drug_package_id: The drug_package_id of this DrugCoverage.
:type: str
"""
self._drug_package_id = drug_package_id
@property
def med_id(self):
"""
Gets the med_id of this DrugCoverage.
Med ID
:return: The med_id of this DrugCoverage.
:rtype: int
"""
return self._med_id
@med_id.setter
def med_id(self, med_id):
"""
Sets the med_id of this DrugCoverage.
Med ID
:param med_id: The med_id of this DrugCoverage.
:type: int
"""
self._med_id = med_id
@property
def quantity_limit(self):
"""
Gets the quantity_limit of this DrugCoverage.
Quantity limit exists
:return: The quantity_limit of this DrugCoverage.
:rtype: bool
"""
return self._quantity_limit
@quantity_limit.setter
def quantity_limit(self, quantity_limit):
"""
Sets the quantity_limit of this DrugCoverage.
Quantity limit exists
:param quantity_limit: The quantity_limit of this DrugCoverage.
:type: bool
"""
self._quantity_limit = quantity_limit
@property
def prior_authorization(self):
"""
Gets the prior_authorization of this DrugCoverage.
Prior authorization required
:return: The prior_authorization of this DrugCoverage.
:rtype: bool
"""
return self._prior_authorization
@prior_authorization.setter
def prior_authorization(self, prior_authorization):
"""
Sets the prior_authorization of this DrugCoverage.
Prior authorization required
:param prior_authorization: The prior_authorization of this DrugCoverage.
:type: bool
"""
self._prior_authorization = prior_authorization
@property
def step_therapy(self):
"""
Gets the step_therapy of this DrugCoverage.
Step Treatment required
:return: The step_therapy of this DrugCoverage.
:rtype: bool
"""
return self._step_therapy
@step_therapy.setter
def step_therapy(self, step_therapy):
"""
Sets the step_therapy of this DrugCoverage.
Step Treatment required
:param step_therapy: The step_therapy of this DrugCoverage.
:type: bool
"""
self._step_therapy = step_therapy
@property
def tier(self):
"""
Gets the tier of this DrugCoverage.
Tier Name
:return: The tier of this DrugCoverage.
:rtype: str
"""
return self._tier
@tier.setter
def tier(self, tier):
"""
Sets the tier of this DrugCoverage.
Tier Name
:param tier: The tier of this DrugCoverage.
:type: str
"""
self._tier = tier
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| vericred/vericred-python | vericred_client/models/drug_coverage.py | Python | apache-2.0 | 16,284 | [
"VisIt"
] | 78ac3e87656bccf7e157097ff9798553f1b154ea996308dccb973458a2baccad |
# A Python port of the C++ port of the NTIA ITS ITM FORTRAN code.
#
# Original FORTRAN code documentation is at:
# http://www.its.bldrdoc.gov/media/50674/itm.pdf
#
# Section numbers referenced here correspond to the FORTRAN code document.
#
# Andrew Clegg
# October 2016
# Last update: Nov 5, 2016
import math
# Static function variables in C++ implemented via global variables in Python
global wd1, xd1, afo, qk, aht, xht # Function adiff
global ad, rr, etq, h0s # Function ascat
global wls # Function alos
global wlos, wscat, dmin, xae # Function lrprop
global kdv # Function avar
global dexa, de, vmd, vs0, sgl, sgtm, sgtp, sgtd, tgtd # Function avar
global gm, gp, cv1, cv2, yv1, yv2, yv3, csm1, csm2, ysm1 # Function avar
global ysm2, ysm3, csp1, csp2, ysp1, ysp2, ysp3, csd1, zd # Function avar
global cfm1, cfm2, cfm3, cfp1, cfp2, cfp3 # Function avar
global ws, w1 # Function avar
# Initialize the static function variables
wd1 = xd1 = afo = qk = aht = xht = 0.
ad = rr = etq = h0s = 0.
wls = 0.
wlos = wscat = False
dmin = xae = 0.
kdv = 0
dexa = de = vmd = vs0 = sgl = sgtm = sgtp = sgtd = tgtd = 0.0
gm = gp = cv1 = cv2 = yv1 = yv2 = yv3 = csm1 = csm2 = ysm1 = 0.0
ysm2 = ysm3 = csp1 = csp2 = ysp1 = ysp2 = ysp3 = csd1 = zd = 0.0
cfm1 = cfm2 = cfm3 = cfp1 = cfp2 = cfp3 = 0.0
ws = w1 = False
class PropType:
# <Primary parameters 2> (Section 2)
def __init__(self):
self.aref = 0.0 # Reference attenuation
self.dist = 0.0 # Distance
self.hg = [0.0, 0.0] # Antenna structural heights, m
self.wn = 0.0 # Wave number (inverse length), 1/m
self.dh = 0.0 # Terrain irregularity parameter, m
self.ens = 0.0 # Surface refractivity, N-units
self.gme = 0.0 # Earth's effective curvature (inverse length), 1/m
self.zgndreal = 0.0 # Surface transfer impedance (real part)
self.zgndimag = 0.0 # Surface transfer impedance (imag part)
self.he = [0.0, 0.0] # Antenna effective heights, m
self.dl = [0.0, 0.0] # Horizon distances, m
self.the = [0.0, 0.0] # Horizon elevation angles
self.kwx = 0 # Error indicator
self.mdp = 0 # Controlling mode
class PropvType:
# <Variability parameters 27> (Section 27)
def __init__(self):
self.sgc = 0.0 # Std dev of situation variability (confidence)
self.lvar = 0 # Control switch
self.mdvar = 0 # Mode of variability
self.klim = 0 # Climate indicator
class PropaType:
# <Secondary parameters 3> (Section 3)
def __init__(self):
self.dlsa = 0.0 # Line-of-sight distance
self.dx = 0.0 # Scatter distance
self.ael = 0.0 # Line-of-sight coefficient
self.ak1 = 0.0 # Line-of-sight coefficient
self.ak2 = 0.0 # Line-of-sight coefficient
self.aed = 0.0 # Diffraction coefficient
self.emd = 0.0 # Diffraction coefficient
self.aes = 0.0 # Scatter coefficient
self.ems = 0.0 # Scatter coefficient
self.dls = [0.0, 0.0] # Smooth earth horizon distance
self.dla = 0.0 # Total horizon distance
self.tha = 0.0 # Total bending angle
# Integer and double functions 'mymin' replaced by Python's min() function
# Integer and double functions 'mymax' replaced by Python's max() function
def fortran_dim(x, y):
"""
This performs the FORTRAN DIM function.
result is x-y if x is greater than y otherwise result is 0.0
"""
if (x > y):
return x-y
else:
return 0.0
def aknfe(v2):
"""
The attenuation due to a single knife edge--the Fresnel integral (in dB)
as a function of v**2. The approximation is that given in [Alg 6.1].
(Section 13)
"""
if(v2 < 5.76):
return 6.02 + 9.11*(v2)**0.5 - 1.27*v2
else:
return 12.953 + 4.343*math.log(v2)
def fht(x, pk):
"""
The height-gain over a smooth spherical earth--to be used in the "three-radii"
method. The approximation is that given in [Alg 6.4].
(Section 14)
"""
if (x < 200.0):
w = -math.log(pk)
if (pk < 1.e-5) or (x * w**3.0 > 5495.0):
fhtv = -117.0
if (x > 1.0):
fhtv = 17.372*math.log(x) + fhtv
else:
fhtv = 2.5e-5*x*x/pk - 8.686*w - 15.0
else:
fhtv = 0.05751*x - 4.343*math.log(x)
if(x < 2000.0):
w = 0.0134*x*math.exp(-0.005*x)
fhtv = (1.0 - w) * fhtv + w*(17.372*math.log(x) - 117.0)
return fhtv
def h0f(r, et):
"""
This is the H01 function for scatter fields as defined in [Alg Section 6].
(Section 25)
"""
a = [25.0, 80.0, 177.0, 395.0, 705.0]
b = [24.0, 45.0, 68.0, 80.0, 105.0]
it = int(et)
if it <= 0:
it = 1
q = 0.0
elif it >= 5:
it = 5
q = 0.0
else:
q = et - it
x = (1.0/r)**2.0
h0fv = 4.343*math.log((a[it-1]*x + b[it-1])*x + 1.0)
if q <> 0.0:
h0fv = (1.0 - q)*h0fv + q*4.343*math.log((a[it]*x + b[it])*x + 1.0)
return h0fv
def ahd(td):
"""
This is the F(theta*d) function for scatter fields.
(Section 26)
"""
a = [ 133.4, 104.6, 71.8]
b = [0.332e-3, 0.212e-3, 0.157e-3]
c = [ -4.343, -1.086, 2.171]
if td <= 10.e3:
i=0
elif td <= 70.e3:
i=1
else:
i=2
return a[i] + b[i]*td + c[i]*math.log(td)
def adiff(d, prop, propa):
"""
The function adiff finds the "diffraction attenuation" at the distance d. It
uses a convex combination of smooth earth diffraction and double knife-
edge diffraction. A call with d = 0 sets up initial constants.
(Section 10)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize these variables.
global wd1, xd1, afo, qk, aht, xht
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
if d == 0:
q = prop.hg[0]*prop.hg[1]
qk = prop.he[0]*prop.he[1] - q
if prop.mdp < 0.0:
q += 10.0
wd1 = (1.0 + qk/q)**0.5
xd1 = propa.dla + propa.tha/prop.gme
q = (1.0 - 0.8*math.exp(-propa.dlsa/50e3))*prop.dh
q *= 0.78*math.exp(- (q/16.)**0.25)
afo = min(15.0, \
2.171*math.log(1.0 + 4.77e-4*prop.hg[0]*prop.hg[1]*prop.wn*q))
qk = 1.0/abs(prop_zgnd)
aht = 20.0
xht = 0.0
for j in range(2):
a = (0.5*prop.dl[j]**2.0)/prop.he[j]
wa = (a*prop.wn)**(1./3.)
pk = qk/wa
q = (1.607 - pk)*151.0*wa*prop.dl[j]/a
xht += q
aht += fht(q,pk)
adiffv = 0.0
else:
th = propa.tha + d*prop.gme
ds = d - propa.dla
q = 0.0795775*prop.wn*ds*(th**2.0)
adiffv = aknfe(q*prop.dl[0]/(ds+prop.dl[0])) \
+ aknfe(q*prop.dl[1]/(ds+prop.dl[1]))
a = ds/th
wa = (a*prop.wn)**(1./3.)
pk = qk/wa
q = (1.607 - pk)*151.0*wa*th+xht
ar = 0.05751*q - 4.343*math.log(q) - aht
q = (wd1 + xd1/d) \
*min(((1.0 - 0.8*math.exp(-d/50e3))*prop.dh*prop.wn),6283.2)
wd = 25.1/(25.1 + q**0.5)
adiffv = ar*wd+(1.0 - wd)*adiffv + afo
return adiffv
def ascat(d, prop, propa):
"""
The function ascat finds the "scatter attenuation" at the distance d. It uses
an approximation to the methods of NBS Tech Note 101 with checks for inadmissable
situations. For proper operation, the larger distance (d = d_6) must be the
first called. A call with d = 0 sets up initial constants.
(Section 22)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize.
global ad, rr, etq, h0s
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
if d == 0.0:
ad = prop.dl[0] - prop.dl[1]
rr = prop.he[1]/prop.he[0]
if ad < 0.0:
ad = -ad
rr = 1.0/rr
etq = (5.67e-6*prop.ens - 2.32e-3)*prop.ens + 0.031
h0s = -15.0
ascatv = 0.0
else:
if h0s > 15.0:
h0 = h0s
else:
th = prop.the[0] + prop.the[1] + d*prop.gme
r2 = 2.0*prop.wn*th
r1 = r2*prop.he[0]
r2 *= prop.he[1]
if r1 < 0.2 and r2 < 0.2:
# Early return
return 1001.0
ss = (d - ad)/(d + ad)
q = rr/ss
ss = max(0.1, ss)
q = min(max(0.1, q), 10.0)
z0 = (d - ad)*(d + ad)*th*0.25/d
et=(etq*math.exp(-pow(min(1.7,z0/8.0e3),6.0))+1.0)*z0/1.7556e3
ett = max(et, 1.0)
h0 = (h0f(r1, ett) + h0f(r2, ett))*0.5
h0 += min(h0, (1.38 - math.log(ett))*math.log(ss)*math.log(q)*0.49)
h0 = fortran_dim(h0, 0.0)
if et < 1.0:
h0 = et*h0+(1.0-et)*4.343* \
math.log(pow((1.0+1.4142/r1)*(1.0+1.4142/r2),2.0)*(r1+r2)/(r1+r2+2.8284))
if h0 > 15.0 and h0s >= 0.0:
h0 = h0s
h0s = h0
th = propa.tha+d*prop.gme
ascatv = ahd(th*d)+4.343*math.log(47.7*prop.wn*pow(th,4.0)) - 0.1 \
* (prop.ens-301.0)*math.exp(-th*d/40e3) + h0
return ascatv
def qerfi(q):
"""
The invese of qerf -- the solution for x to q = Q(x). The approximation is due
to C. Hastings, Jr. ("Approximations for digital computers," Princeton Univ.
Press, 1955) and the maximum error should be 4.5e-4.
(Section 51)
"""
c0 = 2.515516698
c1 = 0.802853
c2 = 0.010328
d1 = 1.432788
d2 = 0.189269
d3 = 0.001308
x = 0.5 - q
t = max(0.5 - abs(x), 0.000001)
t = (-2.0 * math.log(t))**0.5
v = t - ((c2 * t + c1) * t + c0) / (((d3 * t + d2) * t + d1) * t + 1.0)
if (x < 0.0):
v = -v
return v
def qlrps(fmhz, zsys, en0, ipol, eps, sgm, prop):
"""
This routine converts the frequency fmhz, the surface refractivity reduced to
sea level en0, and general system elevation zsys, and the polarization and ground
constants eps, sgm, to wave number un, surface refractivity ens, effective earth
curvature gme, and surface impedance zgnd. It may be used with either the area
prediction or the point-to-point mode.
(Section 41)
"""
gma = 157e-9
prop.wn = fmhz/47.7
prop.ens = en0
if zsys <> 0.0:
prop.ens *= math.exp(-zsys/9460.0)
prop.gme = gma*(1.0 - 0.04665*math.exp(prop.ens/179.3))
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
zq = eps + 376.62*sgm/prop.wn *1j
prop_zgnd = (zq-1.0)**0.5
if ipol <> 0.0:
prop_zgnd = prop_zgnd/zq
prop.zgndreal = prop_zgnd.real
prop.zgndimag = prop_zgnd.imag
def abq_alos(r):
return r.real*r.real + r.imag*r.imag
def alos(d, prop, propa):
"""
The function alos finds the "line-of-sight" attenuation at the distance d. It
uses a convex combination of plane earth fields and diffracted fields. A call
with d = 0 sets up initial constants.
(Section 17)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize.
global wls
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
if d == 0.0:
wls = 0.021/(0.021+prop.wn*prop.dh/max(10e3,propa.dlsa))
alosv = 0.0
else:
q = (1.0-0.8*math.exp(-d/50.e3))*prop.dh
s = 0.78*q*math.exp(-pow(q/16.0,0.25))
q = prop.he[0] + prop.he[1]
sps = q/(d*d + q*q)**0.5
r = (sps - prop_zgnd)/(sps + prop_zgnd)*math.exp(-min(10.0,prop.wn*s*sps))
q = abq_alos(r)
if q < 0.25 or q < sps:
r = r*(sps/q)**0.5
alosv = propa.emd*d + propa.aed
q = prop.wn*prop.he[0]*prop.he[1]*2.0/d
if q > 1.57:
q = 3.14-2.4649/q
alosv = (-4.343*math.log(abq_alos((math.cos(q) - math.sin(q)*1j) + r)) - alosv) * wls \
+ alosv
return alosv
def qlra(kst, klimx, mdvarx, prop, propv):
"""
This is used to prepare the model in the area prediction mode. Normally,
one first calls qlrps and then qlra. Before calling the latter, one should
have defined in the <Primary Parameters 2> the antenna heights hg, the
terrain irregularity dh, and (probably through qlrps) the variables wn,
ens, gme, and zgnd. The input kst will define siting criteria for the
terminals, klimx the climate, and mdvarx the mode of variability. If
klimx <= 0 or mdvarx < 0 the associated parameters remain unchanged.
(Section 42)
"""
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
for j in range(2):
if kst[j] <= 0:
prop.he[j] = prop.hg[j]
else:
q = 4.0
if kst[j] <> 1:
q = 9.0
if prop.hg[j] < 5.0:
q *= sin(0.3141593*prop.hg[j])
prop.he[j] = prop.hg[j] + (1.0 + q) \
*math.exp(-min(20.0,2.0*prop.hg[j]/max(1e-3,prop.dh)))
q = (2.0*prop.he[j]/prop.gme)**0.5
prop.dl[j] = q*math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
prop.the[j] = (0.65*prop.dh*(q/prop.dl[j]-1.0)-2.0*prop.he[j])/q
prop.mdp = 1
propv.lvar = max(propv.lvar, 3)
if mdvarx >= 0:
propv.mdvar = mdvarx
propv.lvar = max(propv.lvar, 4)
if klimx > 0:
propv.klim = klimx
propv.lvar = 5
return 0
def lrprop(d, prop, propa): # // PaulM_lrprop
"""
The Longley Rice propagation program. This is the basic program it
returns the reference attenuation aref.
(Section 4)
AWC Notes
"""
global wlos, wscat
global dmin, xae
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
if prop.mdp <> 0:
for j in range(2):
propa.dls[j] = (2.0*prop.he[j]/prop.gme)**0.5
propa.dlsa = propa.dls[0] + propa.dls[1]
propa.dla = prop.dl[0] + prop.dl[1]
propa.tha = max(prop.the[0]+prop.the[1], -propa.dla*prop.gme)
wlos = False
wscat = False
if prop.wn < 0.838 or prop.wn > 210.0:
prop.kwx = max(prop.kwx, 1)
for j in range(2):
if prop.hg[j] < 1.0 or prop.hg[j] > 1000.0:
prop.kwx = max(prop.kwx, 1)
for j in range(2):
if (abs(prop.the[j]) > 200e-3
or prop.dl[j] < 0.1*propa.dls[j]
or prop.dl[j] > 3.0*propa.dls[j]):
prop.kwx = max(prop.kwx, 3)
if (prop.ens < 250.0 or prop.ens > 400.0
or prop.gme < 75e-9 or prop.gme > 250e-9
or prop_zgnd.real <= abs(prop_zgnd.imag)
or prop.wn < 0.419 or prop.wn > 420.0):
prop.kwx=4
for j in range(2):
if prop.hg[j] < 0.5 or prop.hg[j] > 3000.0:
prop.kwx=4
dmin = abs(prop.he[0] - prop.he[1])/200e-3
q = adiff(0.0, prop, propa)
xae = pow(prop.wn*pow(prop.gme, 2), -(1.0/3.0))
d3 = max(propa.dlsa, 1.3787*xae + propa.dla)
d4 = d3 + 2.7574*xae
a3 = adiff(d3, prop, propa)
a4 = adiff(d4, prop, propa)
propa.emd = (a4 - a3)/(d4 - d3)
propa.aed = a3 - propa.emd*d3
if prop.mdp >= 0:
prop.mdp = 0
prop.dist = d
if prop.dist > 0.0:
if prop.dist > 1000e3:
prop.kwx = max(prop.kwx,1)
if prop.dist < dmin:
prop.kwx = max(prop.kwx,3)
if prop.dist < 1e3 or prop.dist > 2000e3:
prop.kwx = 4
if prop.dist < propa.dlsa:
if not wlos:
q = alos(0.0, prop, propa)
d2 = propa.dlsa
a2 = propa.aed + d2*propa.emd
d0 = 1.908*prop.wn*prop.he[0]*prop.he[1]
if propa.aed >= 0.0:
d0 = min(d0, 0.5*propa.dla)
d1 = d0 + 0.25*(propa.dla-d0)
else:
d1 = max(-propa.aed/propa.emd, 0.25*propa.dla)
a1 = alos(d1, prop, propa)
wq = False
if d0 < d1:
a0 = alos(d0, prop, propa)
q = math.log(d2/d0)
propa.ak2 = max(0.0, ((d2 - d0)*(a1 - a0)-(d1 - d0)*(a2 - a0)) \
/ ((d2-d0)*math.log(d1/d0)-(d1-d0)*q))
wq = (propa.aed >= 0.0 or propa.ak2 > 0.0)
if wq:
propa.ak1 = (a2 - a0 - propa.ak2*q)/(d2 - d0)
if propa.ak1 < 0.0:
propa.ak1 = 0.0
propa.ak2 = fortran_dim(a2, a0)/q
if propa.ak2 == 0.0:
propa.ak1=propa.emd
if not wq:
propa.ak1 = fortran_dim(a2, a1)/(d2 - d1)
propa.ak2 = 0.0
if propa.ak1 == 0.0:
propa.ak1=propa.emd
propa.ael = a2 - propa.ak1*d2 - propa.ak2*math.log(d2)
wlos = True
if prop.dist > 0.0:
prop.aref = propa.ael + propa.ak1*prop.dist \
+ propa.ak2*math.log(prop.dist)
if prop.dist <= 0.0 or prop.dist >= propa.dlsa:
if not wscat:
q = ascat(0.0, prop, propa)
d5 = propa.dla + 200e3
d6 = d5+200e3
a6 = ascat(d6, prop, propa)
a5 = ascat(d5, prop, propa)
if a5 < 1000.0:
propa.ems = (a6 - a5)/200e3
propa.dx = max(propa.dlsa, max(propa.dla+0.3*xae \
*math.log(47.7*prop.wn), (a5-propa.aed-propa.ems*d5) \
/(propa.emd-propa.ems)))
propa.aes=(propa.emd-propa.ems)*propa.dx+propa.aed
else:
propa.ems = propa.emd
propa.aes = propa.aed
propa.dx = 10.e6
wscat = True
if prop.dist > propa.dx:
prop.aref = propa.aes + propa.ems*prop.dist
else:
prop.aref = propa.aed + propa.emd*prop.dist
prop.aref = max(prop.aref, 0.0)
return 0
def curve(c1, c2, x1, x2, x3, de):
"""
(Section 30)
"""
return (c1 + c2/(1.0 + pow((de - x2)/x3, 2.0)))*pow(de/x1, 2.0) \
/ (1.0 + pow(de/x1, 2.0))
def avar(zzt, zzl, zzc, prop, propv):
"""
When in the area prediction mode, one needs a threefold quantile of
attenuation which corresponds to the fraction q_T of time, the fraction
q_L of locations, and the fraction q_S of "situations." In the point to
point mode, one needs only q_T and q_S. For efficiency, avar is written as
a function of the "standard normal deviates" z_T, z_L, and z_S corresponding
to the requested fractions. Thus, for example, q_T = Q(z_T) where Q(z) is
the "complementary standard normal distribution." For the point to point
mode one sets z_L = 0 which corresponds to the median q_L = 0.50.
The subprogram is written trying to reduce duplicate calculations. This is
done through the switch lvar. On first entering, set lvar = 5. Then all
parameters will be initialized, and lvar will be changed to 0. If the
program is to be used to find several quantiles with different values of
z_T, z_L, or z_S, then lvar whould be 0, as it is. If the distance is
changed, set lvar = 1 and parameters that depend on the distance will be
recomputed. If antenna heights are changed, set lvar = 2 if the frequency,
lvar = 3 if the mode variability mdvar, set lvar = 4 and finally, if the
climate is changed, set lvar = 5. The higher the value of lvar, the more
parameters will be recomputed.
(Section 28)
"""
global kdv
global dexa, de, vmd, vs0, sgl, sgtm, sgtp, sgtd, tgtd
global gm, gp, cv1, cv2, yv1, yv2, yv3, csm1, csm2, ysm1
global ysm2, ysm3, csp1, csp2, ysp1, ysp2, ysp3, csd1, zd
global cfm1, cfm2, cfm3, cfp1, cfp2, cfp3
global ws, w1
bv1 = [-9.67,-0.62,1.26,-9.21,-0.62,-0.39,3.15]
bv2 = [12.7,9.19,15.5,9.05,9.19,2.86,857.9]
xv1 = [144.9e3,228.9e3,262.6e3,84.1e3,228.9e3,141.7e3,2222.e3]
xv2 = [190.3e3,205.2e3,185.2e3,101.1e3,205.2e3,315.9e3,164.8e3]
xv3 = [133.8e3,143.6e3,99.8e3,98.6e3,143.6e3,167.4e3,116.3e3]
bsm1 = [2.13,2.66,6.11,1.98,2.68,6.86,8.51]
bsm2 = [159.5,7.67,6.65,13.11,7.16,10.38,169.8]
xsm1 = [762.2e3,100.4e3,138.2e3,139.1e3,93.7e3,187.8e3,609.8e3]
xsm2 = [123.6e3,172.5e3,242.2e3,132.7e3,186.8e3,169.6e3,119.9e3]
xsm3 = [94.5e3,136.4e3,178.6e3,193.5e3,133.5e3,108.9e3,106.6e3]
bsp1 = [2.11,6.87,10.08,3.68,4.75,8.58,8.43]
bsp2 = [102.3,15.53,9.60,159.3,8.12,13.97,8.19]
xsp1 = [636.9e3,138.7e3,165.3e3,464.4e3,93.2e3,216.0e3,136.2e3]
xsp2 = [134.8e3,143.7e3,225.7e3,93.1e3,135.9e3,152.0e3,188.5e3]
xsp3 = [95.6e3,98.6e3,129.7e3,94.2e3,113.4e3,122.7e3,122.9e3]
bsd1 = [1.224,0.801,1.380,1.000,1.224,1.518,1.518]
bzd1 = [1.282,2.161,1.282,20.,1.282,1.282,1.282]
bfm1 = [1.0,1.0,1.0,1.0,0.92,1.0,1.0]
bfm2 = [0.0,0.0,0.0,0.0,0.25,0.0,0.0]
bfm3 = [0.0,0.0,0.0,0.0,1.77,0.0,0.0]
bfp1 = [1.0,0.93,1.0,0.93,0.93,1.0,1.0]
bfp2 = [0.0,0.31,0.0,0.19,0.31,0.0,0.0]
bfp3 = [0.0,2.00,0.0,1.79,2.00,0.0,0.0]
rt = 7.8
rl = 24.0
temp_klim = propv.klim - 1
if propv.lvar > 0:
if propv.lvar not in [1, 2, 3, 4]:
if propv.klim <= 0 or propv.klim > 7:
propv.klim = 5
temp_klim = 4
prop.kwx = max(prop.kwx,2)
cv1 = bv1[temp_klim]
cv2 = bv2[temp_klim]
yv1 = xv1[temp_klim]
yv2 = xv2[temp_klim]
yv3 = xv3[temp_klim]
csm1 = bsm1[temp_klim]
csm2 = bsm2[temp_klim]
ysm1 = xsm1[temp_klim]
ysm2 = xsm2[temp_klim]
ysm3 = xsm3[temp_klim]
csp1 = bsp1[temp_klim]
csp2 = bsp2[temp_klim]
ysp1 = xsp1[temp_klim]
ysp2 = xsp2[temp_klim]
ysp3 = xsp3[temp_klim]
csd1 = bsd1[temp_klim]
zd = bzd1[temp_klim]
cfm1 = bfm1[temp_klim]
cfm2 = bfm2[temp_klim]
cfm3 = bfm3[temp_klim]
cfp1 = bfp1[temp_klim]
cfp2 = bfp2[temp_klim]
cfp3 = bfp3[temp_klim]
if propv.lvar == 4 or propv.lvar not in [1, 2, 3, 4]:
kdv = propv.mdvar
ws = (kdv >= 20)
if ws:
kdv -= 20
w1 = (kdv >= 10)
if w1:
kdv -= 10
if kdv < 0 or kdv > 3:
kdv = 0
prop.kwx = max(prop.kwx,2)
if propv.lvar in [3, 4] or propv.lvar not in [1, 2, 3, 4]:
q = math.log(0.133*prop.wn)
gm = cfm1 + cfm2/(pow(cfm3*q, 2.0) + 1.0)
gp = cfp1 + cfp2/(pow(cfp3*q, 2.0) + 1.0)
if propv.lvar in [2, 3, 4] or propv.lvar not in [1, 2, 3, 4]:
dexa = (18.e6*prop.he[0])**0.5 + (18.e6*prop.he[1])**0.5 \
+ pow((575.7e12/prop.wn), (1./3.))
if propv.lvar in [1, 2, 3, 4] or propv.lvar not in [1, 2, 3, 4]:
if prop.dist < dexa:
de = 130.e3*prop.dist/dexa
else:
de = 130.e3+prop.dist-dexa
vmd = curve(cv1, cv2, yv1, yv2, yv3, de)
sgtm = curve(csm1,csm2,ysm1,ysm2,ysm3,de) * gm
sgtp = curve(csp1,csp2,ysp1,ysp2,ysp3,de) * gp
sgtd = sgtp*csd1
tgtd = (sgtp - sgtd)*zd
if w1:
sgl = 0.0
else:
q = (1.0 - 0.8*math.exp(-prop.dist/50.e3))*prop.dh*prop.wn
sgl = 10.0*q/(q + 13.0)
if ws:
vs0 = 0.0
else:
vs0 = pow(5.0 + 3.0*math.exp(-de/100.e3), 2.0)
propv.lvar=0
zt = zzt
zl = zzl
zc = zzc
if kdv == 0:
zt = zc
zl = zc
elif kdv == 1:
zl = zc
elif kdv == 2:
zl = zt
if abs(zt) > 3.1 or abs(zl) > 3.1 or abs(zc) > 3.1:
prop.kwx = max(prop.kwx, 1)
if zt < 0.0:
sgt = sgtm
elif zt <= zd:
sgt = sgtp
else:
sgt = sgtd + tgtd/zt
vs = vs0 + pow(sgt*zt,2.0)/(rt + zc*zc) + pow(sgl*zl, 2.0)/(rl + zc*zc)
if kdv == 0:
yr = 0.0
propv.sgc = (sgt*sgt + sgl*sgl + vs)**0.5
elif kdv == 1:
yr = sgt*zt
propv.sgc = (sgl*sgl + vs)**0.5
elif kdv == 2:
yr = zt * (sgt*sgt + sgl*sgl)**0.5
propv.sgc = vs**0.5
else:
yr = sgt*zt + sgl*zl
propv.sgc = vs**0.5
avarv = prop.aref - vmd - yr - propv.sgc*zc
if avarv < 0.0:
avarv = avarv*(29.0 - avarv)/(29.0 - 10.0*avarv)
return avarv
def hzns(pfl, prop):
"""
Here we use the terrain profile pfl to find the two horizons. Output consists
of the horizon distances dl and the horizon take-off angles the. If the path is
line-of-sight, the routine sets both horizon distances equal to dist.
(Section 47)
"""
np = int(pfl[0])
xi = pfl[1]
za = pfl[2] + prop.hg[0]
zb = pfl[np+2] + prop.hg[1]
qc = 0.5*prop.gme
q = qc*prop.dist
prop.the[1] = (zb-za)/prop.dist
prop.the[0] = prop.the[1] - q
prop.the[1] = -prop.the[1] - q
prop.dl[0] = prop.dist
prop.dl[1] = prop.dist
if np >= 2:
sa = 0.0
sb = prop.dist
wq = True
for i in range(1, np):
sa += xi
sb -= xi
q = pfl[i+2] - (qc*sa + prop.the[0])*sa - za
if q > 0.0:
prop.the[0] += q/sa
prop.dl[0] = sa
wq = False
if not wq:
q = pfl[i+2] - (qc*sb + prop.the[1])*sb - zb
if q > 0.0:
prop.the[1] += q/sb
prop.dl[1] = sb
def z1sq1 (z, x1, x2, z0, zn):
"""
A linear least squares fit between x1, x2 to the function described by the
array z. This array must have a special format: z(1) = en, the number of
equally large intervals, z(2) = epsilon, the interval length, and z(j+3),
j = 0, ..., n, function values. The output consists of values of the required
line, z0 at 0, zn at xt = n*epsilon.
(Section 53)
[Note: Changed to a function that returns z0 and zn, since Python functions
cannot return modified parameters that are immutable objects. Because of this
change, the code has been changed elsewhere, wherever z1sq1 is called. -- AWC]
"""
xn = z[0]
xa = int(fortran_dim(x1/z[1], 0.0))
xb = xn - int(fortran_dim(xn, x2/z[1]))
if xb <= xa:
xa = fortran_dim(xa, 1.0)
xb = xn - fortran_dim(xn, xb+1.0)
ja = int(xa)
jb = int(xb)
n = jb - ja
xa = xb - xa
x = -0.5*xa
xb += x
a = 0.5*(z[ja+2] + z[jb+2])
b = 0.5*(z[ja+2] - z[jb+2])*x
for i in range(2, n+1):
ja = ja + 1
x += 1.0
a += z[ja + 2]
b += z[ja + 2]*x
a /= xa
b = b*12.0/((xa*xa + 2.0)*xa)
z0 = a - b*xb
zn = a + b*(xn-xb)
return z0, zn
def qtile(nn, a, ir):
"""
This routine provides a quantile. It reorders the array a so that a(j),
j = 1...i_r are all greater than or equal to all a(i), i = i_r ... nn. In
particular, a(i_r) will have the same value it would have if a were completely
sorted in descending order. The returned value is qtile = a(i_r).
(Section 52)
"""
m = 0
n = nn
j1 = n
i0 = m
done = False
goto10 = True
m = 0
n = nn
k = min(max(0, ir), n)
q = a[k]
while not done:
if goto10:
q = a[k]
i0 = m
j1 = n
i = i0
while i <= n and a[i] >= q:
i += 1
if i > n:
i = n
j = j1
while j >= m and a[j] <= q:
j -= 1
if j < m:
j = m
if i < j:
r = a[i]
a[i] = a[j]
a[j] = r
i0 = i+1
j1 = j-1
goto10 = False
elif i < k:
a[k] = a[i]
a[i] = q
m = i+1
goto10 = True
elif j > k:
a[k] = a[j]
a[j] = q
n = j-1
goto10 = True
else:
done = True
return q
def qerf(z):
"""
The standard normal complementary probability -- the function Q(x) =
1/sqrt(2pi) int_x^inf e^(-t^2/2)dt. The approximation is
due to C. Hastings, Jr. ("Approximations for digital computers," Princeton
University Press, 1955) and the maximum error should be 7.5E-8.
(Section 50)
"""
b1 = 0.319381530
b2 = -0.356563782
b3 = 1.781477937
b4 = -1.821255987
b5 = 1.330274429
rp = 4.317008
rrt2pi = 0.398942280
x = z
t = abs(x)
if t >= 10.0:
qerfv = 0.0
else:
t = rp/(t + rp)
qerfv = math.exp(-0.5*x*x)*rrt2pi*((((b5*t + b4)*t + b3)*t + b2)*t + b1)*t
if x < 0.0:
qerfv = 1.0 - qerfv
return qerfv
def d1thx(pfl, x1, x2):
"""
Using the terrain profile pfl we find deltah, the interdecile range of
elevations between the two points x1 and x2.
(Section 48)
"""
np = int(pfl[0])
xa = x1/pfl[1]
xb = x2/pfl[1]
d1thxv = 0.0
if xb - xa < 2.0: # exit out
return d1thxv
ka = int(0.1*(xb - xa + 8.0))
ka = min(max(4, ka), 25)
n = 10*ka - 5
kb = n-ka + 1
sn = n-1
s = []
s.append(sn)
s.append(1.0)
xb = (xb - xa)/sn
k = int(xa + 1.0)
xa -= float(k)
for j in range(n):
while xa > 0.0 and k < np:
xa -= 1.0
k += 1
s.append(pfl[k+2] + (pfl[k+2] - pfl[k+1])*xa)
xa = xa + xb
xa, xb = z1sq1(s,0.0,sn,xa,xb) # Revised call to z1sq1
xb = (xb - xa)/sn
for j in range(n):
s[j+2] -= xa
xa = xa + xb
spartial = s[2:]
d1thxv = qtile(n-1, spartial, ka-1) - qtile(n-1, spartial, kb-1)
d1thxv /= 1.0 - 0.8*math.exp(-(x2 - x1)/50.0e3)
return d1thxv
def qlrpfl(pfl, klimx, mdvarx, prop, propa, propv):
"""
This subroutine may be used to prepare for the point-to-point mode. Since the
path is fixed, it has only one value of aref and therefore at the end of the
routine there is a call to lrprop. To complete the process one needs to call avar
for whatever quantiles are desired.
(Section 43)
"""
xl = []
prop.dist = pfl[0] * pfl[1]
np = int(pfl[0])
hzns(pfl, prop)
for j in range(2):
xl.append(min(15.0*prop.hg[j], 0.1*prop.dl[j]))
xl[1] = prop.dist - xl[1]
prop.dh = d1thx(pfl, xl[0], xl[1])
if prop.dl[0] + prop.dl[1] > 1.5*prop.dist:
za = 0 # Must initialize before calling z1sq1
zb = 0 # Must initialize before calling z1sq1
za, zb = z1sq1(pfl, xl[0], xl[1], za, zb) # Revised call to z1sq1
prop.he[0] = prop.hg[0] + fortran_dim(pfl[2], za)
prop.he[1] = prop.hg[1] + fortran_dim(pfl[np+2], zb)
for j in range(2):
prop.dl[j] = (2.0*prop.he[j]/prop.gme)**0.5 \
* math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
q = prop.dl[0] + prop.dl[1]
if q <= prop.dist:
q = pow(prop.dist/q, 2.0)
for j in range(2):
prop.he[j] *= q
prop.dl[j] = (2.0*prop.he[j]/prop.gme)**0.5 \
* math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
for j in range(2):
q = (2.0*prop.he[j]/prop.gme)**0.5
prop.the[j] = (0.65*prop.dh*(q/prop.dl[j]-1.0)-2.0 \
* prop.he[j])/q
else:
za = 0 # Must initialize before using in function call
q = 0 # Must initialize before using in function call
za, q = z1sq1(pfl, xl[0], 0.9*prop.dl[0], za, q) # Revised call to z1sq1
zb = 0 # Must initialize before using in function call
q, zb = z1sq1(pfl, prop.dist-0.9*prop.dl[1], xl[1], q, zb) # Revised call
prop.he[0] = prop.hg[0] + fortran_dim(pfl[2], za)
prop.he[1] = prop.hg[1] + fortran_dim(pfl[np+2], zb)
prop.mdp = -1
propv.lvar = max(propv.lvar, 3)
if mdvarx >= 0:
propv.mdvar = mdvarx
propv.lvar = max(propv.lvar, 4)
if klimx > 0:
propv.klim = klimx
propv.lvar = 5
lrprop(0.0, prop, propa)
return 0
def deg2rad(d):
"""
Legacy function to convert degrees to radians.
"""
return math.radians(d)
#//********************************************************
#//* Point-To-Point Mode Calculations *
#//********************************************************
def point_to_point(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, strmode, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## conf, rel: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
zc = qerfi(conf)
zr = qerfi(rel)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb):
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 13 # WinnForum mod. ORIGINAL CODE HAS mdvar = 12 ***
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity,prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop, propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
q = prop.dist - propa.dla
if int(q) < 0.0:
strmode = "Line-Of-Sight Mode"
else:
if int(q) == 0.0:
strmode = "Single Horizon"
elif int(q) > 0.0:
strmode = "Double Horizon"
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
strmode += ", Diffraction Dominant"
elif prop.dist > propa.dx:
strmode += ", Troposcatter Dominant"
dbloss = avar(zr, 0.0, zc, prop, propv) + fs
errnum = prop.kwx
return dbloss, strmode, errnum
def point_to_pointMDH(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, timepct,
locpct, confpct, dbloss, propmode, deltaH, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## timepct, locpct, confpct: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## propmode: Value Mode
## -1 mode is undefined
## 0 Line of Sight
## 5 Single Horizon, Diffraction
## 6 Single Horizon, Troposcatter
## 9 Double Horizon, Diffraction
## 10 Double Horizon, Troposcatter
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
propmode = -1 # mode is undefined
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
ztime = qerfi(timepct)
zloc = qerfi(locpct)
zconf = qerfi(confpct)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb):
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 12
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity, prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop,propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
deltaH = prop.dh
q = prop.dist - propa.dla
if int(q) < 0.0:
propmode = 0 # Line-Of-Sight Mode
else:
if int(q) == 0.0:
propmode = 4 # Single Horizon
elif int(q) > 0.0:
propmode = 8 # Double Horizon
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
propmode += 1 # Diffraction Dominant
elif prop.dist > propa.dx:
propmode += 2 # Troposcatter Dominant
dbloss = avar(ztime, zloc, zconf, prop, propv) + fs # avar(time,location,confidence)
errnum = prop.kwx
return dbloss, propmode, deltaH, errnum
def point_to_pointDH (elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, deltaH, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## conf, rel: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
zc = qerfi(conf)
zr = qerfi(rel)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb): # (i=ja-1;i<jb;++i)
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 12
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity, prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop,propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
deltaH = prop.dh
q = prop.dist - propa.dla
if int(q) < 0.0:
strmode = "Line-Of-Sight Mode"
else:
if int(q) == 0.0:
strmode = "Single Horizon"
elif int(q) > 0.0:
strmode = "Double Horizon"
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
strmode += ", Diffraction Dominant"
elif prop.dist > propa.dx:
strmode += ", Troposcatter Dominant"
dbloss = avar(zr, 0.0, zc, prop, propv) + fs # avar(time,location,confidence)
errnum = prop.kwx
return dbloss, deltaH, errnum, strmode # Original routine never returns strmode
##//********************************************************
##//* Area Mode Calculations *
##//********************************************************
def area(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria, RSiteCriteria,
eps_dielect, sgm_conductivity, eno_ns_surfref, frq_mhz, radio_climate,
pol, pctTime, pctLoc, pctConf, dbloss, strmode, errnum):
## pol: 0-Horizontal, 1-Vertical
## TSiteCriteria, RSiteCriteria:
## 0 - random, 1 - careful, 2 - very careful
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## ModVar: 0 - Single: pctConf is "Time/Situation/Location", pctTime, pctLoc not used
## 1 - Individual: pctTime is "Situation/Location", pctConf is "Confidence", pctLoc not used
## 2 - Mobile: pctTime is "Time/Locations (Reliability)", pctConf is "Confidence", pctLoc not used
## 3 - Broadcast: pctTime is "Time", pctLoc is "Location", pctConf is "Confidence"
## pctTime, pctLoc, pctConf: .01 to .99
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
## NOTE: strmode is not used at this time.
prop = PropType()
propv = PropvType()
propa = PropaType()
kst = [int(TSiteCriteria), int(RSiteCriteria)]
zt = qerfi(pctTime)
zl = qerfi(pctLoc)
zc = qerfi(pctConf)
eps = eps_dielect
sgm = sgm_conductivity
eno = eno_ns_surfref
prop.dh = deltaH
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = int(radio_climate)
prop.ens = eno
prop.kwx = 0
ivar = int(ModVar)
ipol = int(pol)
qlrps(frq_mhz, 0.0, eno, ipol, eps, sgm, prop)
qlra(kst, propv.klim, ivar, prop, propv)
if propv.lvar < 1:
propv.lvar = 1
lrprop(dist_km * 1000.0, prop, propa)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
xlb = fs + avar(zt, zl, zc, prop, propv)
dbloss = xlb
if prop.kwx == 0:
errnum = 0
else:
errnum = prop.kwx
return dbloss, errnum
def ITMAreadBLoss(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria,
RSiteCriteria, eps_dielect, sgm_conductivity, eno_ns_surfref,
frq_mhz, radio_climate, pol, pctTime, pctLoc, pctConf):
# Initialize dbloss, errnum, and strmode before using in function call
dbloss = 0.
errnum = 0
strmode = ''
dbloss, errnum = \
area(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria, RSiteCriteria,
eps_dielect, sgm_conductivity, eno_ns_surfref, frq_mhz, radio_climate,
pol, pctTime, pctLoc, pctConf, dbloss, strmode, errnum)
return dbloss, errnum
def ITMDLLVersion():
return 7.0
# Test code:
p2pTest = False
p2pMDHtest = False
p2pDHtest = False
areaTest = False
def setElevation():
"""
Returns an elevation profile for testing point-to-point prop mode.
Andrew Clegg
November 2016
"""
# This is the GLOBE terrain profile from MSAM, from 39N 77W to 39N 77.5W
## return [95, 454.7352316, 89., 92., 89., 92., 100., 104., 106., 108., 106.,
## 100., 88., 80., 75., 78., 80., 80., 86., 91., 98., 105., 110., 107.,
## 103., 97., 91., 89., 92., 87., 81., 79., 77., 75., 80., 85., 89., 98.,
## 105., 107., 107., 106., 102., 105., 112., 108., 99., 84., 61., 51.,
## 74., 86., 93., 97., 100., 102., 109., 114., 116., 117., 117., 112.,
## 113., 117., 122., 129., 138., 131., 119., 103., 93., 87., 83., 86., 97.,
## 99., 103., 111., 108., 101., 97., 95., 95., 94., 90., 85., 81., 78.,
## 78., 78., 78., 79., 83., 89., 89., 91., 96., 101.]
# This is the Crystal Palace to Mursley, England path from the ITM test code
return [156, 499, 96, 84, 65, 46, 46, 46, 61, 41, 33, 27, 23,
19, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
17, 19, 21, 23, 25, 27, 29, 35, 46, 41, 35, 30, 33,
35, 37, 40, 35, 30, 51, 62, 76, 46, 46, 46, 46, 46,
46, 50, 56, 67, 106, 83, 95, 112, 137, 137, 76, 103, 122,
122, 83, 71, 61, 64, 67, 71, 74, 77, 79, 86, 91, 83,
76, 68, 63, 76, 107, 107, 107, 119, 127, 133, 135, 137, 142,
148, 152, 152, 107, 137, 104, 91, 99, 120, 152, 152, 137, 168,
168, 122, 137, 137, 170, 183, 183, 187, 194, 201, 192, 152, 152,
166, 177, 198, 156, 127, 116, 107, 104, 101, 98, 95, 103, 91,
97, 102, 107, 107, 107, 103, 98, 94, 91, 105, 122, 122, 122,
122, 122, 137, 137, 137, 137, 137, 137, 137, 137, 140, 144, 147,
150, 152, 159]
if p2pTest:
#================================
# Example of running in p2p mode
#================================
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 91.
tht_m = 10.0 # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.0
radio_climate = 5 # Continental Temperate
pol = 0 # Vertical
rel = 0.5
conf = 0.5
# Must initialize these variables since they are passed to the function
dbloss = 0
strmode = ''
errnum = 0
a_rel = [0.01, 0.1, 0.5, 0.9, 0.99]
a_conf = [0.5, 0.9, 0.1]
for rel in a_rel:
for conf in a_conf:
dbloss, strmode, errnum = \
point_to_point(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, strmode, errnum)
print rel, conf, dbloss, strmode, errnum
if p2pMDHtest:
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
timepct = 0.5
locpct = 0.5
confpct = 0.5
# Initialize before using in function call
dbloss = propmode = deltaH = errnum = 0
dbloss, propmode, deltaH, errnum = \
point_to_pointMDH(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, timepct,
locpct, confpct, dbloss, propmode, deltaH, errnum)
print dbloss, propmode, deltaH, errnum
if p2pDHtest:
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
conf = 0.5
rel = 0.5
# Initialize before using in function call
dbloss = deltaH = errnum = 0
strmode = ''
dbloss, deltaH, errnum, strmode = \
point_to_pointDH (elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, deltaH, errnum)
print dbloss, deltaH, errnum, strmode
if areaTest:
#================================
# Example of running in area mode
#================================
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
pctTime = 0.5
pctLoc = 0.5
pctConf = 0.5
for dist_km in range(10, 101):
temp = ITMAreadBLoss(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria,
RSiteCriteria, eps_dielect, sgm_conductivity, eno_ns_surfref,
frq_mhz, radio_climate, pol, pctTime, pctLoc, pctConf)
print dist_km, temp[0]
#==================================
| krlinus/Spectrum-Access-System | src/prop_current/itm.py | Python | apache-2.0 | 49,748 | [
"CRYSTAL",
"VMD"
] | 86130ed8d93d64e3ff6e9bc3ca9c98ec827af547c3af18facf55379185cdb0e6 |
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import m5
import os
from m5 import internal
from m5.internal.stats import schedStatEvent as schedEvent
from m5.objects import Root
from m5.util import attrdict, fatal
# Try and include SQLAlchemy. If this is successful we allow it to be used,
# otherwise it is disabled.
try:
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
import sql as m5sql
SQL_ENABLED = True
except:
print "Failed to import sqlalchemy or m5.stats.sql. SQL will not be enabled."
SQL_ENABLED = False
# Global variable to determine if statistics output is enabled
STATS_OUTPUT_ENABLED = False
# Keep track of the number of stats dumps performed. Lets us keep
# track of the data we write to the SQL database
dump_count = 0
class OutputSQL(object):
""" Class which outputs the stats to a database. """
def __init__(self, filename):
""" Create the database and add the tables used to store the stats. """
self.filename = filename
self.db = m5sql.create_database(self.filename)
Session = sessionmaker(bind = self.db)
m5sql.create_tables(self.db)
self.session = Session()
def visit(self, stat):
""" Write the stats to the database.
On the first dump we also write the information about the stats to the
database. This is only done once.
"""
global dump_count
if dump_count == 0:
m5sql.add_stat_info(stat, self.session)
m5sql.store_stat_value(stat, self.session, dump_count)
def __call__(self, context):
global dump_count
# On the first dump write the information about the stats to the
# database.
if dump_count == 0:
for name,stat in context.iterate():
m5sql.add_stat_info(stat, self.session)
# Write the values to the database.
for name,stat in context.iterate():
m5sql.store_stat_value(stat, self.session, dump_count)
# Commit our changes to the database. All changes are commited once
# to allow SQLAlchemy to optimize the database accesses, resulting in
# faster stats dumps.
self.session.commit()
def valid(self):
""" Checks if the database file exists at the specified location. """
return os.path.exists(self.filename)
def begin(self, desc):
m5sql.store_dump_desc(self.session, desc, dump_count)
def end(self):
""" Commits all the data at once. """
self.session.commit()
outputList = []
def initText(filename, desc=True):
output = internal.stats.initText(filename, desc)
outputList.append(output)
global STATS_OUTPUT_ENABLED
STATS_OUTPUT_ENABLED = True
def initSimStats():
internal.stats.initSimStats()
internal.stats.registerPythonStatsHandlers()
def init_SQL(outputDirectory, filename):
""" Add the stats database as an output and add it to outputList.
Args:
outputDirectory: The directlry to store the database.
filename: The filename to which the stats are written.
"""
global SQL_ENABLED
# Take the supplied filename and prepend the output directory.
import os
filename = os.path.join(outputDirectory, filename)
if SQL_ENABLED:
output = OutputSQL(filename)
outputList.append(output)
global STATS_OUTPUT_ENABLED
STATS_OUTPUT_ENABLED = True
return True
else:
return False
def stats_output_enabled():
""" Check that at least one statistics output format is enabled.
Return:
True if at least one output format is enabled, False otherwise
"""
return STATS_OUTPUT_ENABLED
names = []
stats_dict = {}
stats_list = []
raw_stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
__dynamic_cast = []
for k, v in internal.stats.__dict__.iteritems():
if k.startswith('dynamic_'):
__dynamic_cast.append(v)
for stat in internal.stats.statsList():
for cast in __dynamic_cast:
val = cast(stat)
if val is not None:
stats_list.append(val)
raw_stats_list.append(val)
break
else:
fatal("unknown stat type %s", stat)
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
internal.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump(stats_desc=""):
'''Dump all statistics data to the registered outputs'''
if not STATS_OUTPUT_ENABLED:
return
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
internal.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin(stats_desc)
for stat in stats_list:
output.visit(stat)
output.end()
global dump_count
dump_count = dump_count + 1
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
internal.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
| andrewfu0325/gem5-aladdin | src/python/m5/stats/__init__.py | Python | bsd-3-clause | 7,968 | [
"VisIt"
] | 841842fdc9a17b554c86b65981cbc76bb0da0fecd74e33a308732c6c73e3885c |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
class RegionGrowing(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._image_threshold = vtk.vtkImageThreshold()
# seedconnect wants unsigned char at input
self._image_threshold.SetOutputScalarTypeToUnsignedChar()
self._image_threshold.SetInValue(1)
self._image_threshold.SetOutValue(0)
self._seed_connect = vtk.vtkImageSeedConnectivity()
self._seed_connect.SetInputConnectValue(1)
self._seed_connect.SetOutputConnectedValue(1)
self._seed_connect.SetOutputUnconnectedValue(0)
self._seed_connect.SetInput(self._image_threshold.GetOutput())
module_utils.setup_vtk_object_progress(self, self._seed_connect,
'Performing region growing')
module_utils.setup_vtk_object_progress(self, self._image_threshold,
'Thresholding data')
# we'll use this to keep a binding (reference) to the passed object
self._input_points = None
# this will be our internal list of points
self._seed_points = []
self._config._thresh_interval = 5
config_list = [
('Auto threshold interval:', '_thresh_interval', 'base:float',
'text',
'Used to calculate automatic threshold (unit %).')]
ScriptedConfigModuleMixin.__init__(
self, config_list,
{'Module (self)' : self,
'vtkImageSeedConnectivity' : self._seed_connect,
'vtkImageThreshold' : self._image_threshold})
self.sync_module_logic_with_config()
def close(self):
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._image_threshold
self._seed_connect.SetInput(None)
del self._seed_connect
ModuleBase.close(self)
def get_input_descriptions(self):
return ('vtkImageData', 'Seed points')
def set_input(self, idx, input_stream):
if idx == 0:
# will work for None and not-None
self._image_threshold.SetInput(input_stream)
else:
if input_stream != self._input_points:
self._input_points = input_stream
def get_output_descriptions(self):
return ('Region growing result (vtkImageData)',)
def get_output(self, idx):
return self._seed_connect.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def execute_module(self):
self._sync_to_input_points()
# calculate automatic thresholds (we can only do this if we have
# seed points and input data)
ii = self._image_threshold.GetInput()
if ii and self._seed_points:
ii.Update()
mins, maxs = ii.GetScalarRange()
ranges = maxs - mins
sums = 0.0
for seed_point in self._seed_points:
# we assume 0'th component!
v = ii.GetScalarComponentAsDouble(
seed_point[0], seed_point[1], seed_point[2], 0)
sums = sums + v
means = sums / float(len(self._seed_points))
lower_thresh = means - \
float(self._config._thresh_interval / 100.0) * \
float(ranges)
upper_thresh = means + \
float(self._config._thresh_interval / 100.0) * \
float(ranges)
print "Auto thresh: ", lower_thresh, " - ", upper_thresh
self._image_threshold.ThresholdBetween(lower_thresh, upper_thresh)
self._seed_connect.Update()
def _sync_to_input_points(self):
# extract a list from the input points
temp_list = []
if self._input_points:
for i in self._input_points:
temp_list.append(i['discrete'])
if temp_list != self._seed_points:
self._seed_points = temp_list
self._seed_connect.RemoveAllSeeds()
# we need to call Modified() explicitly as RemoveAllSeeds()
# doesn't. AddSeed() does, but sometimes the list is empty at
# this stage and AddSeed() isn't called.
self._seed_connect.Modified()
for seedPoint in self._seed_points:
self._seed_connect.AddSeed(seedPoint[0], seedPoint[1],
seedPoint[2])
| nagyistoce/devide | modules/filters/RegionGrowing.py | Python | bsd-3-clause | 4,889 | [
"VTK"
] | 05f024983e88ea17bf8b44adcb94c574d6d60b741848872ce56fb883381f963b |
"""
<description>
"""
import numpy as np
import pytest as pt
import teetool as tt
def test_basis():
"""
testing basis class
"""
mpoints = 10
x_test = np.linspace(0, 1, mpoints)
mbasis = 5
mdim = 3
# test exception
with pt.raises(NotImplementedError) as testException:
_ = tt.basis.Basis(basisType="Hello World!", nbasis=mbasis, ndim=mdim)
myBasis = tt.basis.Basis(basisType="rbf", nbasis=mbasis, ndim=mdim)
# test Gaussian
res = myBasis._funcRbf(x=0, mu1=0, sig1=1)
assert(np.isfinite(res))
res = myBasis._getBasisRbfVector(x_sca=0, nbasis=mbasis)
assert(res.shape == (1, mbasis))
#
assert (np.any(np.isfinite(res)))
# test bernstein
res = myBasis._getBasisBernsteinVector(x_sca=0, nbasis=mbasis)
assert(res.shape == (1, mbasis))
#
assert (np.any(np.isfinite(res)))
# test assortment
for mbasis in [5]:
for mdim in [2]:
for mtype in ["rbf", "bernstein"]:
# settings
myBasis = tt.basis.Basis(basisType=mtype, nbasis=mbasis, ndim=mdim)
# obtain basis
H = myBasis.get(x_test)
# shape [mpoints x mgaus]
assert (H.shape == (mpoints*mdim, mbasis*mdim))
# all finite numbers
assert (np.any(np.isfinite(res)))
| WillemEerland/teetool | test/test_basis.py | Python | mit | 1,365 | [
"Gaussian"
] | 67423dbf250ad3046609ce5a6428c41f63ec883a2b813803e21b58ca708d7eb9 |
'''
Created on 2013-05-29
@author: brian
'''
from src.util import Vector2
class Fallback(object):
def __init__(self, actor):
self.actor = actor
self.target = None
def condition(self):
self.target = None
for entity in self.actor.getEnemies():
if self.actor.distance(entity.position) < 3:
self.target = entity
if self.target is None:
return False
pos = Vector2(self.actor.position)
pos -= self.target.position
x, y = pos
direction = (self.sign(x), 0) if abs(x) > abs(y) else (0, self.sign(y))
alternative = (self.sign(x), 0) if abs(x) <= abs(y) else (0, self.sign(y))
pos = Vector2(self.actor.position)
pos += direction
pos2 = Vector2(self.actor.position)
pos2 += alternative
if self.actor.world.isLocationPassable(pos):
self.direction = direction
return True
elif self.actor.world.isLocationPassable(pos2):
self.direction = alternative
return True
return False
def do(self):
self.actor.move(self.direction)
@staticmethod
def sign(n):
return -1 if n < 0 else 1
| Greymerk/python-rpg | src/ai/task/fallback.py | Python | gpl-3.0 | 1,079 | [
"Brian"
] | d46f08a1f156df2c116d11a4e25404cf2cec22f727dc35b0cbb9a65c210329e5 |
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" a module for interacting with databases
"""
| strets123/rdkit | rdkit/Dbase/__init__.py | Python | bsd-3-clause | 340 | [
"RDKit"
] | e3ceda22e8ab851899c9459a2a6b9b902ac4e9d171c6f0612f37b35b319948f7 |
"""
N-ary Tree implementations.
@author: Lia Nemeth
"""
import weakref
class NaryTree(object):
"""
A generic N-ary tree implementation, that uses a list to store
its children.
"""
def __init__(self, key=None, item=None, children=None, parent=None):
self.key = key
self.item = item
self.children = children or []
self._parent = weakref.ref(parent) if parent else None
@property
def parent(self):
if self._parent:
return self._parent()
def __getstate__(self):
self._parent = None
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
for child in self.children:
child._parent = weakref.ref(self)
def __str__(self):
return '<{type} - {key} : {item}>'.format(type=type(self).__name__,
key=self.key,
item=self.item)
def is_leaf(self):
return len(self.children) == 0
def get_height(self):
heights = [child.get_height() for child in self.children]
return max(heights) + 1 if heights else 1
def traversal(self, visit=None, *args, **kwargs):
visit(self, *args, **kwargs)
l = [self]
for child in self.children:
l += child.traversal(visit, *args, **kwargs)
return l
def __iter__(self):
yield self
for child in self.children:
yield child
def add_child(self, key=None, item=None):
child = NaryTree(key=key, item=item, parent=self)
self.children.append(child)
return child
| lucasnemeth/forest | forest/NaryTree.py | Python | bsd-3-clause | 1,678 | [
"VisIt"
] | 148c6765e83269a6bfb53cd08a7d144aa598d5b5fcf88078448f7168e7a099f9 |
import cgi
import os
import datetime
import HTMLParser
import json
import logging
import re
import ushlex as shlex
import urllib
from bson.objectid import ObjectId
from django.conf import settings
from django.contrib.auth import authenticate, login as user_login
from django.core.urlresolvers import reverse, resolve, get_script_prefix
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.html import escape as html_escape
from django.utils.http import urlencode
from mongoengine.base import ValidationError
from operator import itemgetter
from crits.config.config import CRITsConfig
from crits.core.audit import AuditLog
from crits.core.bucket import Bucket
from crits.core.class_mapper import class_from_id, class_from_type, key_descriptor_from_obj_type, class_from_value
from crits.core.crits_mongoengine import Action, Releasability, json_handler
from crits.core.crits_mongoengine import CritsSourceDocument
from crits.core.crits_mongoengine import EmbeddedPreferredAction
from crits.core.source_access import SourceAccess
from crits.core.data_tools import create_zip, format_file
from crits.core.mongo_tools import mongo_connector, get_file
from crits.core.sector import Sector
from crits.core.user import CRITsUser, EmbeddedSubscriptions
from crits.core.user import EmbeddedLoginAttempt
from crits.core.user_tools import user_sources, is_admin
from crits.core.user_tools import save_user_secret
from crits.core.user_tools import get_user_email_notification
from crits.actors.actor import Actor
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.domains.domain import Domain
from crits.events.event import Event
from crits.exploits.exploit import Exploit
from crits.ips.ip import IP
from crits.notifications.handlers import get_user_notifications, generate_audit_notification
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData
from crits.emails.email import Email
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.signatures.signature import Signature
from crits.targets.target import Target
from crits.indicators.indicator import Indicator
from crits.core.totp import valid_totp
logger = logging.getLogger(__name__)
def action_add(type_, id_, tlo_action, user=None, **kwargs):
"""
Add an action to a TLO.
:param type_: The class type of the top level object.
:type type_: str
:param id_: The ObjectId of the to level object to update.
:type id_: str
:param tlo_action: The information about the action.
:type tlo_action: dict
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"object" (dict) if successful.
"""
obj_class = class_from_type(type_)
if not obj_class:
return {'success': False,
'message': 'Not a valid type: %s' % type_}
sources = user_sources(user)
obj = obj_class.objects(id=id_,
source__name__in=sources).first()
if not obj:
return {'success': False,
'message': 'Could not find TLO'}
try:
tlo_action = datetime_parser(tlo_action)
tlo_action['analyst'] = user
obj.add_action(tlo_action['action_type'],
tlo_action['active'],
tlo_action['analyst'],
tlo_action['begin_date'],
tlo_action['end_date'],
tlo_action['performed_date'],
tlo_action['reason'],
tlo_action['date'])
obj.save(username=user)
return {'success': True, 'object': tlo_action}
except (ValidationError, TypeError, KeyError), e:
return {'success': False, 'message': e}
def action_remove(type_, id_, date, user, **kwargs):
"""
Remove an action from a TLO.
:param type_: The class type of the top level object.
:type type_: str
:param id_: The ObjectId of the TLO to remove an action from.
:type id_: str
:param date: The date of the action to remove.
:type date: datetime.datetime
:param analyst: The user removing the action.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
obj_class = class_from_type(type_)
if not obj_class:
return {'success': False,
'message': 'Not a valid type: %s' % type_}
sources = user_sources(user)
obj = obj_class.objects(id=id_,
source__name__in=sources).first()
if not obj:
return {'success': False,
'message': 'Could not find TLO'}
try:
date = datetime_parser(date)
obj.delete_action(date)
obj.save(username=user)
return {'success': True}
except (ValidationError, TypeError), e:
return {'success': False, 'message': e}
def action_update(type_, id_, tlo_action, user=None, **kwargs):
"""
Update an action for a TLO.
:param type_: The class type of the top level object.
:type type_: str
:param id_: The ObjectId of the top level object to update.
:type id_: str
:param tlo_action: The information about the action.
:type tlo_action: dict
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"object" (dict) if successful.
"""
obj_class = class_from_type(type_)
if not obj_class:
return {'success': False,
'message': 'Not a valid type: %s' % type_}
sources = user_sources(user)
obj = obj_class.objects(id=id_,
source__name__in=sources).first()
if not obj:
return {'success': False,
'message': 'Could not find TLO'}
try:
tlo_action = datetime_parser(tlo_action)
tlo_action['analyst'] = user
obj.edit_action(tlo_action['action_type'],
tlo_action['active'],
tlo_action['analyst'],
tlo_action['begin_date'],
tlo_action['end_date'],
tlo_action['performed_date'],
tlo_action['reason'],
tlo_action['date'])
obj.save(username=user)
return {'success': True, 'object': tlo_action}
except (ValidationError, TypeError), e:
return {'success': False, 'message': e}
def description_update(type_, id_, description, user, **kwargs):
"""
Change the description of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param description: The description to use.
:type description: str
:param user: The user setting the description.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
description = h.unescape(description)
try:
obj.description = description
obj.save(username=user)
return {'success': True, 'message': "Description set."}
except ValidationError, e:
return {'success': False, 'message': e}
def data_update(type_, id_, data, analyst):
"""
Change the data of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param data: The data to use.
:type data: str
:param analyst: The user setting the description.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(analyst)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data = h.unescape(data)
try:
obj.data = data
obj.save(username=analyst)
return {'success': True, 'message': "Data set."}
except ValidationError, e:
return {'success': False, 'message': e}
def get_favorites(analyst):
"""
Get all favorites for a user.
:param analyst: The username.
:type analyst: str
:returns: dict with keys "success" (boolean) and "results" (string)
"""
user = CRITsUser.objects(username=analyst).first()
if not user:
return {'success': False, 'message': '<div id="favorites_results">Could not find user.</div>'}
favorites = user.favorites.to_dict()
if not favorites:
return {'success': True, 'message': '<div id="favorites_results">You have no favorites.</div>'}
field_dict = {
'Actor': 'name',
'Backdoor': 'name',
'Campaign': 'name',
'Certificate': 'filename',
'Comment': 'object_id',
'Domain': 'domain',
'Email': 'id',
'Event': 'title',
'Exploit': 'name',
'Indicator': 'id',
'IP': 'ip',
'PCAP': 'filename',
'RawData': 'title',
'Sample': 'filename',
'Screenshot': 'id',
'Signature': 'title',
'Target': 'email_address'
}
results = '''
<table>
<tbody>
'''
for type_, attr in field_dict.iteritems():
if type_ in favorites:
ids = [ObjectId(s) for s in favorites[type_]]
objs = class_from_type(type_).objects(id__in=ids).only(attr)
for obj in objs:
obj_attr = getattr(obj, attr)
results += '<tr><td>%s</td><td><a href="%s">%s</a></td>' % (type_,
reverse('crits.core.views.details',
args=(type_, str(obj.id))),
obj_attr)
results += '<td><span class="ui-icon ui-icon-trash remove_favorite favorites_icon_active" '
results += 'data-type="%s" data-id="%s"></span></td><td width="5px"></td></tr>' % (type_, str(obj.id))
results += '</tbody></table>'
return {'success': True, 'results': results}
def favorite_update(type_, id_, analyst):
"""
Toggle the favorite of a top-level object in a user profile on or off.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param analyst: The user toggling the favorite.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
user = CRITsUser.objects(username=analyst).first()
if not user:
return {'success': False, 'message': 'Could not find user.'}
if id_ in user.favorites[type_]:
user.favorites[type_].remove(id_)
else:
user.favorites[type_].append(id_)
try:
user.save()
except:
pass
return {'success': True}
def status_update(type_, id_, value="In Progress", user=None, **kwargs):
"""
Update the status of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param value: The status to set it to.
:type value: str
:param user: The user setting the status.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
obj.set_status(value)
# Check to see if the set_status was successful or not.
if obj.status != value:
return {'success': False, 'message': 'Invalid status: %s.' % value }
obj.save(username=user)
return {'success': True, 'value': value}
except ValidationError, e:
return {'success': False, 'message': e}
def get_data_for_item(item_type, item_id):
"""
Get a minimal amount of data for the passed item.
Used by the clipboard to provide selected item information.
:param item_type: Item type (Domain, Indicator, etc...)
:type item_type: str
:param item_id: Item database ID (_id)
:type item_id: str
:returns: dict -- Contains the item data
"""
type_to_fields = {
'Actor': ['name', ],
'Backdoor': ['name', ],
'Campaign': ['name', ],
'Certificate': ['filename', ],
'Domain': ['domain', ],
'Email': ['from_address', 'date', ],
'Event': ['title', 'event_type', ],
'Exploit': ['name', 'cve', ],
'Indicator': ['value', 'ind_type', ],
'IP': ['ip', 'type', ],
'PCAP': ['filename', ],
'RawData': ['title', ],
'Sample': ['filename', ],
'Signature': ['title', ],
'Target': ['email_address', ],
}
response = {'OK': 0, 'Msg': ''}
if not item_id or not item_type:
response['Msg'] = "No item data provided"
return response
if not item_type in type_to_fields:
response['Msg'] = "Invalid item type: %s" % item_type
return response
doc = class_from_id(item_type, item_id)
if not doc:
response['Msg'] = "Item not found"
return response
response['OK'] = 1
response['data'] = {}
for field in type_to_fields[item_type]:
if field in doc:
value = doc[field]
if len(value) > 30:
saved = value
value = saved[:15]
value += '...'
value += saved[-15:]
response['data'][field.title()] = value
return response
def add_rfi(type_,id_, topic, analyst, source, date=None):
"""
Adds a new RFI
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param topic: The topic of the RFI.
:type topic: str
:param analyst: The user adding this RFI topic.
:type analyst: str
:param source: The source of the user.
:type source: str
:param date: The date added.
:type date: datetime
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
result = obj.new_rfi_topic(topic, analyst, source, date)
if not result['success']:
return {'success': False,
'message': result['message']}
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['rfi']}
except Exception, e:
return {'success': False,
'message': "Could not add new RFI: %s" % e}
def add_rfi_request(type_,id_, topic, rfi, analyst, source, date=None, status=None):
"""
Adds a new RFI response
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param topic: The topic of the RFI.
:type topic: str
:param rfi: The response of the RFI.
:type rfi: str
:param analyst: The user adding this RFI topic.
:type analyst: str
:param source: The source of the user.
:type source: str
:param date: The date added.
:type date: datetime
:param status: The status of the RFI.
:type status: str
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
result = obj.rfi_request(topic, rfi, analyst, source, date, status)
if not result['success']:
res = obj.new_rfi_topic(topic, analyst, source, date)
if res['success']:
result = obj.rfi_request(topic, rfi, analyst, source, date, status)
if not result['success']:
return {'success': False,
'message': result['message']}
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['rfi']}
except Exception, e:
return {'success': False,
'message': "Could not add RFI Question: %s" % e}
def toggle_rfi_status(type_,id_, analyst, topic, question, response=None):
"""
Toggles the RFI status of item (OLD/NEW)
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param analyst: The user adding this RFI topic.
:type analyst: str
:param topic: The topic of the RFI.
:type topic: str
:param question: The request of the RFI.
:type question: str
:param response: The response of the RFI.
:type response: str
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
result = obj.toggle_rfi_status(topic, question, response)
if not result['success']:
return {'success': False,
'message': result['message']}
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['rfi']}
except Exception, e:
return {'success': False,
'message': "Could not toggle RFI Status: %s" % e}
def add_rfi_response(type_,id_, topic, response, question, analyst, source, date=None, status=None):
"""
Adds a new RFI response
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param topic: The topic of the RFI.
:type topic: str
:param response: The response of the RFI.
:type response: str
:param question: The request of the RFI.
:type question: str
:param analyst: The user adding this RFI topic.
:type analyst: str
:param source: The source of the user.
:type source: str
:param date: The date added.
:type date: datetime
:param status: The status of the RFI.
:type status: str
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
result = obj.rfi_response(topic, response, question, analyst, source, date, status)
if not result['success']:
return {'success': False,
'message': result['message']}
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['rfi']}
except Exception, e:
return {'success': False,
'message': "Could not add RFI Answer: %s" % e}
def set_tlp(type_, id_, value, user):
"""
Set tlp of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param value: The value of the sighting. (T/F)
:type value: boolean
:param user: The user adding the sighting.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if value not in ['AMBER', 'RED', 'GREEN', 'WHITE']:
return {'success': False,
'message': "Not a valid color"}
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
if user == 'taxii' and obj.tlp:
return
obj.tlp = value
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['tlp']}
except Exception, e:
return {'success': False,
'message': "Could not add tlp: %s" % e}
def set_sighting(type_, id_, date, value, user):
"""
Set sighting of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
;param date: The date of the sighting
;type date: DateTime
:param value: The value of the sighting. (T/F)
:type value: boolean
:param user: The user adding the sighting.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
obj.set_sighting(date, value)
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['sightings']}
except Exception, e:
return {'success': False,
'message': "Could not add sighting: %s" % e}
def add_sighting(type_, id_, name, date, user):
"""
Add sighting to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The organization to add sighting for.
:type name: str
;param date: The date of the sighting
;type date: DateTime
:param user: The user adding the sighting.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object"}
try:
obj.add_sighting(name, date)
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['sightings']}
except Exception, e:
return {'success': False,
'message': "Could not add sighting: %s" % e}
def set_releasability_flag(type_, id_, user, name=None, reference_id=None):
obj = class_from_id(type_, id_)
if not obj:
obj = class_from_value(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.set_releasability_flag(name=name, reference_id=reference_id)
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not set releasability flag: %s" % e}
def add_releasability(type_, id_, name, user, **kwargs):
"""
Add releasability to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to add releasability for.
:type name: str
:param user: The user adding the releasability.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.add_releasability(name=name, analyst=user, instances=[])
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not add releasability: %s" % e}
def add_releasability_instance(type_, _id, name, analyst):
"""
Add releasability instance to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to add releasability instance for.
:type name: str
:param analyst: The user adding the releasability instance.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
date = datetime.datetime.now()
ri = Releasability.ReleaseInstance(analyst=analyst, date=date)
obj.add_releasability_instance(name=name, instance=ri)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not add releasability instance: %s" % e}
def remove_releasability_instance(type_, _id, name, date, analyst):
"""
Remove releasability instance from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to remove releasability instance from.
:type name: str
:param date: The date of the instance being removed.
:type date: datetime.datetime
:param analyst: The user removing the releasability instance.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.remove_releasability_instance(name=name, date=date)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not remove releasability instance: %s" % e}
def remove_releasability(type_, _id, name, analyst):
"""
Remove releasability from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to remove from releasability.
:type name: str
:param analyst: The user removing the releasability.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.remove_releasability(name=name)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not remove releasability: %s" % e}
def sanitize_releasability(releasability, user_sources):
"""
Remove any releasability that is for sources a user does not have access to
see.
:param releasability: The releasability list for a top-level object.
:type releasability: list
:param user_sources: The sources a user has access to.
:type user_sources: list
:returns: list
"""
# currently this uses dictionary lookups.
# when we move to classes, this should use attributes
return [r for r in releasability if r['name'] in user_sources]
def ui_themes():
"""
Return a list of available UI themes.
:returns: list
"""
ui_themes = os.listdir(os.path.join(settings.MEDIA_ROOT,
'css/jquery-themes'))
return ui_themes
def does_source_exist(source, active=False):
"""
Determine if a source exists.
:param source: The name of the source to search for.
:type source: str
:param active: Whether the source also needs to be marked as active or not.
:type active: boolean
:returns: True, False
"""
query = {'name': source}
if active:
query['active'] = 'on'
if len(SourceAccess.objects(__raw__=query)) > 0:
return True
else:
return False
def add_new_source(source, analyst):
"""
Add a new source to CRITs.
:param source: The name of the new source.
:type source: str
:param analyst: The user adding the new source.
:type analyst: str
:returns: True, False
"""
try:
source = source.strip()
src = SourceAccess.objects(name=source).first()
if src:
return False
src = SourceAccess()
src.name = source
src.save(username=analyst)
return True
except ValidationError:
return False
def merge_source_lists(left, right):
"""
Merge source lists takes two source list objects and merges them together.
Left can be an empty list and it will set the list to be the right list for
you. We will always return the left list.
:param left: Source list one.
:type left: list
:param right: Source list two.
:type right: list
:returns: list
"""
if left is None:
return right
elif len(left) < 1:
return right
else:
#if two sources have the same name and same date, we can assume they're
# the same instance
left_name_dates = {}
for i in left:
left_name_dates[i['name']] = [inst['date'] for inst in i['instances']]
for src in right:
match = False
for s in left:
if src['name'] == s['name']:
match = True
left_dates = left_name_dates[s['name']]
for i in src['instances']:
if i['date'] not in left_dates:
s['instances'].append(i)
if not match:
left.append(src)
return left
def source_add_update(type_, id_, action_type, source, method='',
reference='', date=None, user=None, **kwargs):
"""
Add or update a source for a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param action_type: Whether or not we are doing an "add" or "update".
:type action_type: str
:param source: The name of the source.
:type source: str
:param method: The method of data acquisition for the source.
:type method: str
:param reference: The reference to the data for the source.
:type reference: str
:param date: The date of the instance to add/update.
:type date: datetime.datetime
:param user: The user performing the add/update.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"object" (if successful)
:class:`crits.core.crits_mongoengine.EmbeddedSource.SourceInstance`
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
date = datetime_parser(date)
if action_type == "add":
obj.add_source(source=source,
method=method,
reference=reference,
date=date,
analyst=user)
else:
obj.edit_source(source=source,
method=method,
reference=reference,
date=date,
analyst=user)
obj.save(username=user)
obj.reload()
obj.sanitize_sources(username=user)
if not obj.source:
return {'success': False,
'message': 'Object has no sources.'}
for s in obj.source:
if s.name == source:
if action_type == "add":
return {'success': True,
'object': s,
'message': "Source addition successful!"}
else:
for i in s.instances:
if i.date == date:
return {'success': True,
'object': s,
'instance': i,
'message': "Source addition successful!"}
break
return {'success': False,
'message': ('Could not make source changes. '
'Refresh page and try again.')}
except (ValidationError, TypeError), e:
return {'success':False, 'message': e}
def source_remove(type_, id_, name, date, user=None, **kwargs):
"""
Remove a source instance from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The name of the source.
:type name: str
:param date: The date of the instance to remove.
:type date: datetime.datetime
:param user: The user performing the removal.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
date = datetime_parser(date)
result = obj.remove_source(source=name,
date=date)
obj.save(username=user)
return result
except (ValidationError, TypeError), e:
return {'success':False, 'message': e}
def source_remove_all(obj_type, obj_id, name, analyst=None):
"""
Remove a source from a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param name: The name of the source.
:type name: str
:param analyst: The user performing the removal.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
result = obj.remove_source(source=name,
remove_all=True)
obj.save(username=analyst)
return result
except ValidationError, e:
return {'success':False, 'message': e}
def get_sources(obj_type, obj_id, analyst):
"""
Get a list of sources for a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param analyst: The user performing the search.
:type analyst: str
:returns: list if successful or dict with keys "success" (boolean) and
"message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
obj.sanitize_sources(username=analyst)
return obj.source
def get_source_names(active=False, limited=False, username=None):
"""
Get a list of available sources in CRITs sorted alphabetically.
:param active: Whether or not the sources returned should be active.
:type active: boolean
:param limited: If the sources should be limited to only those the user has
access to.
:type limited: boolean
:param username: The user requesting the source list.
:type username: str
:returns: list
"""
query = {}
if limited:
user_src_list = user_sources(username)
query["name"] = {'$in': user_src_list}
if active:
query['active'] = 'on'
c = SourceAccess.objects(__raw__=query).order_by('+name')
return c
def get_action_types_for_tlo(obj_type):
final = []
if obj_type is not None:
for a in Action.objects(object_types=obj_type,
active='on').order_by("+name"):
final.append(a.name)
return final
def get_item_names(obj, active=None):
"""
Get a list of item names for a specific item in CRITs.
:param obj: The class representing the item to get names for.
:type obj: class
:param active: Return:
None: active and inactive items.
True: active items.
False: inactive items.
:type active: boolean
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
# Don't use this to get sources.
if isinstance(obj, SourceAccess):
return []
if active is None:
c = obj.objects().order_by('+name')
else:
if active:
c = obj.objects(active='on').order_by('+name')
else:
c = obj.objects(active='off').order_by('+name')
return c
def promote_bucket_list(bucket, confidence, name, related, description, analyst):
"""
Promote a bucket to a Campaign. Every top-level object which is tagged with
this specific bucket will get attributed to the provided campaign.
:param bucket: The bucket to promote.
:type bucket: str
:param confidence: The Campaign confidence.
:type confidence: str
:param name: The Campaign name.
:type name: str
:param related: If we should extend this attribution to top-level objects
related to these top-level objects.
:type related: boolean
:param description: A description of this Campaign attribution.
:type description: str
:param analyst: The user promoting this bucket.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
from crits.campaigns.handlers import campaign_add
bucket = Bucket.objects(name=bucket).first()
if not bucket:
return {'success': False, 'message': 'Unable to find bucket.'}
for ctype in [k for k in Bucket._meta['schema_doc'].keys() if k != 'name' and k != 'Campaign']:
# Don't bother if the count for this type is 0
if getattr(bucket, ctype, 0) == 0:
continue
klass = class_from_type(ctype)
if not klass:
continue
objs = klass.objects(bucket_list=bucket.name)
for obj in objs:
campaign_add(name, confidence, description, related, analyst, obj=obj)
return {'success': True,
'message': 'Bucket successfully promoted. <a href="%s">View campaign.</a>' % reverse('crits.campaigns.views.campaign_details', args=(name,))}
def alter_bucket_list(obj, buckets, val):
"""
Given a list of buckets on this object, increment or decrement
the bucket_list objects accordingly. This is used when adding
or removing a bucket list to an item, and when deleting an item.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:param buckets: List of buckets.
:type buckets: list
:param val: The amount to change the count by.
:type val: int
"""
# This dictionary is used to set values on insert only.
# I haven't found a way to get mongoengine to use the defaults
# when doing update_one() on the queryset.
from crits.core.bucket import Bucket
soi = { k: 0 for k in Bucket._meta['schema_doc'].keys() if k != 'name' and k != obj._meta['crits_type'] }
soi['schema_version'] = Bucket._meta['latest_schema_version']
# We are using mongo_connector here because mongoengine does not have
# support for a setOnInsert option. If mongoengine were to gain support
# for this we should switch to using it instead of pymongo here.
buckets_col = mongo_connector(settings.COL_BUCKET_LISTS)
for name in buckets:
buckets_col.update({'name': name},
{'$inc': {obj._meta['crits_type']: val},
'$setOnInsert': soi},
upsert=True)
# Find and remove this bucket if, and only if, all counts are zero.
if val == -1:
Bucket.objects(name=name,
Actor=0,
Backdoor=0,
Campaign=0,
Certificate=0,
Domain=0,
Email=0,
Event=0,
Exploit=0,
Indicator=0,
IP=0,
PCAP=0,
RawData=0,
Sample=0,
Signature=0,
Target=0).delete()
def generate_bucket_csv(request):
"""
Generate CSV output for the Bucket list.
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return csv_export(request, Bucket)
def generate_bucket_jtable(request, option):
"""
Generate the jtable data for rendering in the bucket list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == 'jtlist':
details_url = 'crits.core.views.bucket_list'
details_key = 'name'
response = jtable_ajax_list(Bucket,
details_url,
details_key,
request,
includes=['name',
'Actor',
'Backdoor',
'Campaign',
'Certificate',
'Domain',
'Email',
'Event',
'Exploit',
'Indicator',
'IP',
'PCAP',
'RawData',
'Sample',
'Signature',
'Target'])
return HttpResponse(json.dumps(response, default=json_handler),
content_type='application/json')
fields = ['name', 'Actor', 'Backdoor', 'Campaign', 'Certificate', 'Domain',
'Email', 'Event', 'Exploit', 'Indicator', 'IP', 'PCAP', 'RawData',
'Sample', 'Signature', 'Target', 'Promote']
jtopts = {'title': 'Buckets',
'fields': fields,
'listurl': 'jtlist',
'searchurl': reverse('crits.core.views.global_search_listing'),
'default_sort': 'name ASC',
'no_sort': ['Promote'],
'details_link': ''}
jtable = build_jtable(jtopts, request)
for ctype in fields:
if ctype == 'id':
continue
elif ctype == 'name':
url = reverse('crits.core.views.global_search_listing') + '?search_type=bucket_list&search=Search&force_full=1'
elif ctype == 'Promote':
url = reverse('crits.core.views.bucket_promote')
else:
lower = ctype.lower()
if lower != "rawdata":
url = reverse('crits.%ss.views.%ss_listing' % (lower, lower))
else:
lower = "raw_data"
url = reverse('crits.%s.views.%s_listing' % (lower, lower))
for field in jtable['fields']:
if field['fieldname'].startswith("'" + ctype):
if ctype == 'name':
field['display'] = """ function (data) {
return '<a href="%s&q='+encodeURIComponent(data.record.name)+'">' + data.record.name + '</a>';
}
""" % url
elif ctype == 'Promote':
# This is really ugly. I don't know of a better way to
# use the campaign addition form and also submit name of
# the bucket. So the form is POSTed but the URL also
# has a bucket parameter that is for the name of the
# to operate on.
field['display'] = """ function (data) {
return '<div class="icon-container"><span class="add_button" data-intro="Add a campaign" data-position="right"><a href="#" action="%s?name='+encodeURIComponent(data.record.name)+'" class="ui-icon ui-icon-plusthick dialogClick" dialog="campaign-add" persona="promote" title="Promote to campaign"></a></span></div>'
}
""" % url
else:
field['display'] = """ function (data) {
return '<a href="%s?bucket_list='+encodeURIComponent(data.record.name)+'">'+data.record.%s+'</a>';
}
""" % (url, ctype)
return render_to_response('bucket_lists.html',
{'jtable': jtable,
'jtid': 'bucket_lists'},
RequestContext(request))
def modify_bucket_list(itype, oid, tags, analyst):
"""
Modify the bucket list for a top-level object.
:param itype: The CRITs type of the top-level object to modify.
:type itype: str
:param oid: The ObjectId to search for.
:type oid: str
:param tags: The list of buckets.
:type tags: list
:param analyst: The user making the modifications.
"""
obj = class_from_id(itype, oid)
if not obj:
return
obj.add_bucket_list(tags, analyst, append=False)
try:
obj.save(username=analyst)
except ValidationError:
pass
def write_INTREP_txt(j, tmp_path):
"""
Given a json representation of a CRITs object, write a custom
INTREP report in a text document.
:param j: json represenation of a CRITs object
:type j: json
:param tmp_path: temporary path to save the INTREP document
:type tmp_path: str
"""
def add_sources(lines,sources):
for source in sources:
lines.append("SAMPLE WAS SEEN AT {0}".format(source['name']))
for instance in source['instances']:
lines.append("\tANALYST {0} DISCOVERED ON {1}".format(instance['analyst'],instance['date']))
if instance['method']:
lines.append("\t\tMETHOD OF DISCOVERY: {0}".format(instance['method']))
if instance['reference']:
lines.append("\t\tREFERENCE OF DISCOVERY: {0}".format(instance['reference']))
return lines
def add_comments(lines,obj_id,obj_type):
from crits.comments.handlers import get_comments
comments = get_comments(obj_id,obj_type)
if comments:
lines.append("ANALYST COMMENTS")
i = 0
for comment in comments:
lines.append("ANALYST {0} MADE THE FOLLOWING COMMENT AT {1}".format(comment.analyst,comment.date))
lines.append("\tCOMMENT #"+str(i),comment.comment)
i += 1
return lines
lines = []
lines.append("INTELLIGENCE REPORT - INTREP")
lines.append("############################")
if "filename" in j.keys():
j_type = "Sample"
lines.append("FILE SAMPLE")
lines.append("FILENAME: " + j["filename"])
try:
lines.append("OTHER FILENAMES: " + j['filenames'])
except:
pass
lines.append("CREATED: " + j["created"])
try:
lines.append("FILETYPE: " + j["filetype"])
except:
pass
lines.append("FILE SIZE IN BYTES: " + str(j["size"]))
lines.append("MD5: " + j["md5"])
try:
lines.append("SHA1: " + j["sha1"])
lines.append("SHA256: " + j["sha256"])
lines.append("SSDEEP: " + j["ssdeep"])
except:
pass
try:
lines.append("MIMETYPE: " + j["mimetype"])
except:
pass
try:
lines.append("\n")
lines.append("PART OF CAMPAIGN: " + ",".join(j["campaign"]))
lines.append("SECTORS TARGETED: " + ",".join(j["sectors"]))
lines.append("BUCKET LIST: " + ",".join(j["bucket_list"]))
lines.append("RELEASABILITY: " + ",".join(j["releasability"]))
lines.append("\n")
lines.append("THIS SAMPLE WAS SEEN AT FOLLOWING SOURCES")
lines = add_sources(lines,j['source'])
lines = add_comments(lines,j["_id"],j_type)
except:
PrintException()
elif "from" in j.keys():
j_type = "Email"
lines.append("EMAIL")
try:
lines.append("TO: " + ",".join(j["to"]))
lines.append("FROM: " + j["from"])
lines.append("SENDER: " + j["sender"])
lines.append("DATE: " + j["isodate"])
lines.append("SUBJECT: " + j["subject"])
lines.append("\n")
lines.append("X-MAILER: " + j["x_mailer"])
lines.append("MESSAGE ID: " + j["message_id"])
lines.append("REPLY TO: " + j["reply_to"])
if 'originating_ip' in j:
lines.append("ORIGINATING IP: " + j["originating_ip"])
lines.append("X ORIGINATING IP: " + j["x_originating_ip"])
lines.append("HELO: " + j["helo"])
lines.append("BUCKET LIST: " + ",".join(j["bucket_list"]))
lines.append("RELEASABILITY:" + ",".join(j["releasability"]))
lines.append("\n")
except:
PrintException()
add_sources(lines,j['source'])
add_comments(lines,j["_id"],j_type)
elif "type" in j.keys():
j_type = "Indicator"
lines.append("INDICATOR")
lines.append("TYPE: " + j["type"])
lines.append("VALUE: " + j["value"])
lines.append("DATE: " + j["created"])
lines.append("THREAT TYPE: " + j["threat_type"])
lines.append("ATTACK TYPE: " + j["attack_type"])
lines.append("IMPACT: " + j["impact"])
lines.append("CONFIDENCE: " + j["confidence"])
lines.append("BUCKET LIST: " + j["bucket_list"])
lines.append("RELEASABILITY: " + j["releasability"])
lines.append("\n")
lines = add_sources(lines,j['source'])
lines = add_comments(lines,j["_id"],j_type)
with open(tmp_path, 'w') as f:
for line in lines:
f.write(line + "\n")
return j_type
def write_INTREP_docx(j, tmp_path):
"""
Given a json representation of a CRITs object, write a custom
INTREP report in a docx document.
:param j: json represenation of a CRITs object
:type j: json
:param tmp_path: temporary path to save the INTREP document
:type tmp_path: str
"""
def docx_write(doc,x=None,y=None):
#print type(y)
if isinstance(y,list):
try:
y = ", ".join(y)
except TypeError:
print y
for i in y:
if "analyst" in i.keys() and "name" in i.keys():
doc.add_paragraph("RELEASABILITY: analyst {0} added releasability to {1}".format(i['analyst'], i['name']))
return
if x and y:
doc.add_paragraph("{0}: {1}".format(x,y))
elif not x and not y:
#Print blank line
doc.add_paragraph("")
else:
pass #Do not print empty objects
def add_sources(doc,sources):
for source in sources:
doc.add_paragraph("SAMPLE WAS SEEN AT {0}".format(source['name']))
for instance in source['instances']:
doc.add_paragraph("\tANALYST {0} DISCOVERED ON {1}".format(instance['analyst'],instance['date']))
if instance['method']:
doc.add_paragraph("\t\tMETHOD OF DISCOVERY: {0}".format(instance['method']))
if instance['reference']:
doc.add_paragraph("\t\tREFERENCE OF DISCOVERY: {0}".format(instance['reference']))
def add_comments(doc,obj_id,obj_type):
from crits.comments.handlers import get_comments
comments = get_comments(obj_id,obj_type)
if comments:
doc.add_heading("ANALYST COMMENTS",2)
i = 0
#comment_pattern = r"'comment', u'([\w\s]+)'\)"
for comment in comments:
doc.add_paragraph("ANALYST {0} MADE THE FOLLOWING COMMENT AT {1}".format(comment.analyst,comment.date))
docx_write(doc,"\tCOMMENT #"+str(i),comment.comment)
i += 1
import docx
docx.text.run.Font.size = docx.shared.Pt(12)
doc = docx.Document()
doc.add_heading("INTELLIGENCE REPORT [INTREP]",0)
docx.text.run.Font.size = docx.shared.Pt(12)
j_type = None
if "filename" in j.keys():
j_type = "Sample"
doc.add_heading("FILE SAMPLE",1)
#doc.add_paragraph("FILENAME: ",j["filename"])
docx_write(doc,"FILENAME",j["filename"])
docx_write(doc,"OTHER FILENAMES",j['filenames'])
docx_write(doc,"CREATED",j["created"])
#doc.add_paragraph("FILETYPE: ",j["filetype"])
try:
docx_write(doc,"FILETYPE",j["filetype"])
except:
pass
docx_write(doc,"FILE SIZE IN BYTES",j["size"])
#doc.add_paragraph("MD5: ",j["md5"])
docx_write(doc,"MD5",j["md5"])
try:
docx_write(doc,"SHA1",j["sha1"])
docx_write(doc,"SHA256",j["sha256"])
docx_write(doc,"SSDEEP",j["ssdeep"])
except:
print "caught special case"
#doc.add_paragraph("CREATED: ",j["created"])
try:
docx_write(doc,"MIMETYPE",j["mimetype"])
except:
pass
docx_write(doc)
docx_write(doc,"PART OF CAMPAIGN",j["campaign"])
docx_write(doc,"SECTORS TARGETED",j["sectors"])
docx_write(doc,"BUCKET LIST",j["bucket_list"])
docx_write(doc,"RELEASABILITY",j["releasability"])
docx_write(doc)
doc.add_heading("THIS SAMPLE WAS SEEN AT FOLLOWING SOURCES",2)
add_sources(doc,j['source'])
add_comments(doc,j["_id"],j_type)
elif "from" in j.keys():
j_type = "Email"
doc.add_heading("EMAIL",1)
docx_write(doc,"TO",j["to"])
docx_write(doc,"FROM",j["from"])
docx_write(doc,"SENDER", j["sender"])
docx_write(doc,"DATE",j["isodate"])
docx_write(doc,"SUBJECT",j["subject"])
docx_write(doc)
docx_write(doc,"X-MAILER",j["x_mailer"])
docx_write(doc,"MESSAGE ID",j["message_id"])
docx_write(doc,"REPLY TO",j["reply_to"])
if 'originating_ip' in j:
docx_write(doc,"ORIGINATING IP",j["originating_ip"])
docx_write(doc,"X ORIGINATING IP",j["x_originating_ip"])
docx_write(doc,"HELO",j["helo"])
docx_write(doc,"BUCKET LIST",j["bucket_list"])
docx_write(doc,"RELEASABILITY",j["releasability"])
docx_write(doc)
add_sources(doc,j['source'])
add_comments(doc,j["_id"],j_type)
elif "type" in j.keys():
j_type = "Indicator"
doc.add_heading("INDICATOR",1)
docx_write(doc,"TYPE",j["type"])
docx_write(doc,"VALUE",j["value"])
docx_write(doc,"DATE",j["created"])
docx_write(doc,"THREAT TYPE",j["threat_type"])
docx_write(doc,"ATTACK TYPE",j["attack_type"])
docx_write(doc,"IMPACT",j["impact"])
docx_write(doc,"CONFIDENCE",j["confidence"])
docx_write(doc,"BUCKET LIST",j["bucket_list"])
docx_write(doc,"RELEASABILITY",j["releasability"])
docx_write(doc)
add_sources(doc,j['source'])
add_comments(doc,j["_id"],j_type)
docx_write(doc)
doc.save(tmp_path)
#doc.add_paragraph(json.dumps(j,sort_keys=True, indent=4,separators=(',',': ')))
return j_type
def PrintException():
import linecache
import sys
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, Line {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
def download_object_handler(total_limit, depth_limit, rel_limit, rst_fmt,
bin_fmt, object_types, objs, sources,
make_zip=True):
"""
Given a list of tuples, collect the objects for each given the total
number of objects to return for each, the depth to traverse for each
and the maximum number of relationships to consider before ignoring.
NOTE: This function can collect more than total_limit number of objects
because total_limit applies only to each call to collect_objects() and
not to the total number of things collected.
:param total_limit: The max number of objects to return.
:type total_limit: int
:param depth_limit: The level of relationships to recurse into.
:type depth_limit: int
:param rel_limit: The limit on how many relationhips a top-level object
should have before we ignore its relationships.
:type rel_limit: int
:param rst_fmt: The format the results should be in ("zip", "json",
"json_no_bin", "intrep_docx", "intrep_txt").
:type rst_fmt: str
:param object_types: The types of top-level objects to include.
:type object_types: list
:param objs: A list of types (<obj_type>, <obj_id>) that we should use as
our basis to collect for downloading.
:type objs: list
:param sources: A list of sources to limit results against.
:type sources: list
:returns: A dict with the keys:
"success" (boolean),
"filename" (str),
"data" (str),
"mimetype" (str)
"""
result = {'success': False}
json_docs = []
to_zip = []
need_filedata = rst_fmt != 'json_no_bin'
if rst_fmt == "intrep_docx" or rst_fmt == "intrep_txt":
need_filedata = False
if not need_filedata:
bin_fmt = None
# If bin_fmt is not zlib or base64, force it to base64.
if rst_fmt == 'json' and bin_fmt not in ['zlib', 'base64']:
bin_fmt = 'base64'
for (obj_type, obj_id) in objs:
# get related objects
new_objects = collect_objects(obj_type, obj_id, depth_limit,
total_limit, rel_limit, object_types,
sources, need_filedata=need_filedata)
# if result format calls for binary data to be zipped, loop over
# collected objects and convert binary data to bin_fmt specified, then
# add to the list of data to zip up
for (oid, (otype, obj)) in new_objects.items():
if ((otype == PCAP._meta['crits_type'] or
otype == Sample._meta['crits_type'] or
otype == Certificate._meta['crits_type']) and
rst_fmt == 'zip'):
if obj.filedata: # if data is available
if bin_fmt == 'raw':
to_zip.append((obj.filename, obj.filedata.read()))
else:
(data, ext) = format_file(obj.filedata.read(),
bin_fmt)
to_zip.append((obj.filename + ext, data))
obj.filedata.seek(0)
else:
try:
json_docs.append(obj.to_json())
except:
pass
zip_count = len(to_zip)
print "json_docs", json_docs
if rst_fmt == 'intrep_docx':
#MD: TODO figure out how to manage the docx download without saving file
tmp_path = '/tmp/del_me.docx'
try:
j = json.loads(json_docs[0])
j_type = write_INTREP_docx(j, tmp_path)
except Exception, e:
print "Fail to load json for writing intrep"
print e
j_type = None
if j_type != None:
doc_data = open(tmp_path,'rb').read()
os.remove(tmp_path)
result['success'] = True
result['data'] = doc_data
result['filename'] = "INTREP-"+j_type.upper()+"-"+str(j['_id'][-4:])+".docx"
result['mimetype'] = 'application/vnd.openxmlformats-officedocument.wordprocessingm1.document'
elif rst_fmt == "intrep_txt":
tmp_path = '/tmp/del_me.txt'
try:
j = json.loads(json_docs[0])
j_type = write_INTREP_txt(j, tmp_path)
except Exception, e:
print "Failed to write INTREP txt"
print e
PrintException()
j_type = None
if j_type != None:
doc_data = open(tmp_path,'rb').read()
os.remove(tmp_path)
result['success'] = True
result['data'] = doc_data
result['filename'] = "INTREP-"+j_type.upper()+"-"+str(j['_id'][-4:])+".txt"
result['mimetype'] = 'application/vnd.openxmlformats-officedocument.wordprocessingm1.document'
elif zip_count <= 0:
result['success'] = True
result['data'] = json_docs
result['filename'] = "crits.json"
result['mimetype'] = 'text/json'
else:
zip_data = to_zip
for doc in json_docs:
inner_filename = "%s.xml" % doc['id']
zip_data.append((inner_filename, doc))
result['success'] = True
result['data'] = create_zip(zip_data, True)
result['filename'] = "CRITS_%s.zip" % datetime.datetime.today().strftime("%Y-%m-%d")
result['mimetype'] = 'application/zip'
return result
def collect_objects(obj_type, obj_id, depth_limit, total_limit, rel_limit,
object_types, sources, need_filedata=True, depth=0):
"""
Collects an object from the database, along with its related objects, to
the specified depth, or until the total limit is reached. This is a
breadth first traversal because I think it's better to get objects as
close to the initial one as possible, rather than traversing to the
bottom of a tree first.
If depth_limit is 0, relationships are not examined.
If an object has too many relationships (configurable system wide)
then it is ignored and that branch of the relationship tree is not
taken.
The returned object types will be only those in object_types. If
a sample is found without a valid filedata attribute it will be
collected only if need_fildata is False.
Objects are returned as a dictionary with the following key/value
mapping:
_id: (obj_type, crits_obj)
Sources should be a list of the names of the sources the user has
permission to access.
:param obj_type: The CRITs top-level object type to work with.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param depth_limit: The level of relationships to recurse into.
:type depth_limit: int
:param total_limit: The max number of objects to return.
:type total_limit: int
:param rel_limit: The limit on how many relationhips a top-level object
should have before we ignore its relationships.
:type rel_limit: int
:param object_types: The types of top-level objects to include.
:type object_types: list
:param sources: A list of sources to limit results against.
:type sources: list
:param need_filedata: Include data from GridFS if applicable.
:type need_filedata: boolean
:param depth: Depth tracker. Default is 0 to start at no relationships and
work our way down.
:returns: A dict with ObjectIds as keys, and values of tuples
(<object_type>, <object>).
"""
objects = {}
# This dictionary is used to keep track of nodes that have been
# seen already. This ensures that we do not circle back on the graph.
seen_objects = {}
def inner_collect(obj_type, obj, sources, depth, depth_limit, total_limit,
object_types, need_filedata):
# Don't keep going if the total number of objects is reached.
if len(objects) >= total_limit:
return objects
# Be cognizant of the need to collect samples with no backing binary
# if the user asked for no binaries (need_filedata is False).
#
# If the object has a filedata attribute we need to collect it
# if need_filedata is true and the filedata attribute is valid.
# If the object does not have a valid filedata attribute and
# need_filedata is False, then collect it (metadata only).
#
# If the object is not one we want to collect we will still traverse
# down that path of the graph, but will not collect the object.
if obj_type in object_types:
if hasattr(obj, 'filedata'):
if obj.filedata and need_filedata:
objects[obj.id] = (obj_type, obj)
elif not need_filedata:
objects[obj.id] = (obj_type, obj)
else:
objects[obj.id] = (obj_type, obj)
seen_objects[obj.id] = True
# If not recursing (depth_limit == 0), return.
# If at depth limit, return.
if depth_limit == 0 or depth >= depth_limit:
return objects
new_objs = []
for r in obj.relationships:
# Don't touch objects we have already seen.
if r.object_id in seen_objects:
continue
seen_objects[r.object_id] = True
new_class = class_from_type(r.rel_type)
if not new_class:
continue
new_obj = new_class.objects(id=str(r.object_id),
source__name__in=sources).first()
if not new_obj:
continue
# Don't go down this branch if there are too many relationships.
# This most often happens when a common resource is extracted
# from many samples.
if len(new_obj.relationships) > rel_limit:
continue
# Save the objects so we can recurse into them later.
new_objs.append((r.rel_type, new_obj))
# Try to collect the new object, but don't handle relationships.
# Do this by setting depth_limit to 0.
inner_collect(r.rel_type, new_obj, sources, depth, 0, total_limit,
object_types, need_filedata)
# Each of the new objects become a new starting point for traverse.
depth += 1
for (new_type, new_obj) in new_objs:
inner_collect(new_type, new_obj, sources, depth, depth_limit,
total_limit, object_types, need_filedata)
# END OF INNER COLLECT
klass = class_from_type(obj_type)
if not klass:
return objects
obj = klass.objects(id=str(obj_id), source__name__in=sources).first()
if not obj:
return objects
inner_collect(obj_type, obj, sources, 0, depth_limit, total_limit,
object_types, need_filedata)
return objects
def modify_source_access(analyst, data):
"""
Update a user profile.
:param analyst: The user to update.
:type analyst: str
:param data: The user profile fields to change and their values.
:type data: dict
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
user = CRITsUser.objects(username=data['username']).first()
if not user:
user = CRITsUser.create_user(
data.get('username', ''),
data.get('password', ''),
data.get('email') )
if not user:
return {'success': False,
'message': 'Missing user information username/password/email'}
user.first_name = data['first_name']
user.last_name = data['last_name']
user.email = data['email']
user.role = data['role']
user.sources = data['sources']
user.organization = data['organization']
user.totp = data['totp']
user.secret = data['secret']
if len(data.get('password', '')) > 1:
if user.set_password(data['password']) == False:
config = CRITsConfig.objects().first()
pc = config.password_complexity_desc
return {'success': False,
'message': 'Password does not meet complexity policy: %s' % pc}
if data['subscriptions'] == '':
user.subscriptions = EmbeddedSubscriptions()
try:
user.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False,
'message': format_error(e)}
def datetime_parser(value):
"""
Iterate over a dict to confirm that keys containing the word 'dict' are
in fact datetime.datetime objects.
If a string is passed, returns a datetime.datetime
:param value: str or a dictionary to iterate over.
:type value: str or dict
:returns: str or dict
"""
if isinstance(value,datetime.datetime):
return value
elif isinstance(value,basestring) and value:
return datetime.datetime.strptime(value, settings.PY_DATETIME_FORMAT)
elif isinstance(value,dict):
for k,v in value.items():
# Make sure that date is in the key, value is a string, and val is not ''
if "date" in k and isinstance(v,basestring) and v:
value[k] = datetime.datetime.strptime(v, settings.PY_DATETIME_FORMAT)
return value
else:
raise TypeError("Invalid type passed.")
def format_error(e):
"""
Takes an Exception and returns a nice string representation.
:param e: An exception.
:type e: Exception
:returns: str
"""
return e.__class__.__name__+": "+unicode(e)
def toggle_item_state(type_, oid, analyst):
"""
Toggle an item active/inactive.
:param type_: The CRITs type for this item.
:type type_: str
:param oid: The ObjectId to search for.
:type oid: str
:param analyst: The user toggling this item.
:type analyst: str
:returns: dict with key "success" (boolean)
"""
obj = class_from_id(type_, oid)
if not obj:
return {'success': False}
if obj.active == 'on':
obj.active = 'off'
else:
obj.active = 'on'
try:
obj.save(username=analyst)
return {'success': True}
except ValidationError:
return {'success': False}
def do_add_preferred_actions(obj_type, obj_id, username):
"""
Add all preferred actions to an object.
:param obj_type: The type of object to update.
:type obj_type: str
:param obj_id: The ObjectId of the object to update.
:type obj_id: str
:param username: The user adding the preferred actions.
:type username: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"object" (list of dicts) if successful.
"""
klass = class_from_type(obj_type)
if not klass:
return {'success': False, 'message': 'Invalid type'}
preferred_actions = Action.objects(preferred__object_type=obj_type,
active='on')
if not preferred_actions:
return {'success': False, 'message': 'No preferred actions'}
sources = user_sources(username)
obj = klass.objects(id=obj_id, source__name__in=sources).first()
if not obj:
return {'success': False, 'message': 'Could not find object'}
actions = []
now = datetime.datetime.now()
# Get preferred actions and add them.
for a in preferred_actions:
for p in a.preferred:
if (p.object_type == obj_type and
obj.__getattribute__(p.object_field) == p.object_value):
action = {'action_type': a.name,
'active': 'on',
'analyst': username,
'begin_date': now,
'end_date': None,
'performed_date': now,
'reason': 'Preferred action toggle',
'date': now}
obj.add_action(action['action_type'],
action['active'],
action['analyst'],
action['begin_date'],
action['end_date'],
action['performed_date'],
action['reason'],
action['date'])
actions.append(action)
if len(actions) < 1:
return {'success': False, 'message': 'No preferred actions'}
# Change status to In Progress if it is currently 'New'
if obj.status == 'New':
obj.set_status('In Progress')
try:
obj.save(username=username)
except ValidationError, e:
return {'success': False, 'message': e}
return {'success': True, 'object': actions}
def get_item_state(type_, name):
"""
Get the state of an item.
:param type_: The CRITs type for this item.
:type type_: str
:param name: The name of the item.
:type name: str
:returns: True if active, False if inactive.
"""
query = {'name': name}
obj = class_from_type(type_).objects(__raw__=query).first()
if not obj:
return False
if obj.active == 'on':
return True
else:
return False
def remove_quotes(val):
"""
Remove surrounding quotes from a string.
:param val: The string to remove quotes from.
:type val: str
:returns: str
"""
if val.startswith(('"', "'",)) and val.endswith(('"', "'",)):
val = val[1:-1]
return val
def generate_regex(val):
"""
Takes the value, removes surrounding quotes, and generates a PyMongo $regex
query for use on a field.
:param val: The string to use for a regex.
:type val: str
:returns: dict with key '$regex' if successful, 'error' if failed.
"""
try:
return {'$regex': re.compile('%s' % remove_quotes(val), re.I)}
except Exception, e:
return {'error': 'Invalid Regular Expression: %s\n\n\t%s' % (val,
str(e))}
def parse_search_term(term, force_full=False):
"""
Parse a search term to break it into search operators that we can use to
enhance the search results.
:param term: Search term
:type term: str
:returns: search string or dictionary for regex search
"""
# decode the term so we aren't dealing with weird encoded characters
if force_full == False:
term = urllib.unquote(term)
search = {}
# setup lexer, parse our term, and define operators
try:
sh = shlex.shlex(term.strip())
sh.wordchars += '!@#$%^&*()-_=+[]{}|\:;<,>.?/~`'
sh.commenters = ''
parsed = list(iter(sh.get_token, ''))
except Exception as e:
search['query'] = {'error': str(e)}
return search
operators = ['regex', 'full', 'type', 'field']
# for each parsed term, check to see if we have an operator and a value
regex_term = ""
if len(parsed) > 0:
for p in parsed:
s = p.split(':')
if len(s) >= 2:
so = s[0]
st = ':'.join(s[1:])
if so in operators:
# can make this more flexible for regex?
if so == 'regex':
search['query'] = generate_regex(st)
elif so == 'full':
regex_term += "%s " % (st,)
force_full = True
elif so == 'type':
search['type'] = st.title()
elif so == 'field':
search['field'] = remove_quotes(st.lower())
else:
regex_term += "%s:%s " % (so, st)
else:
regex_term += "%s " % p
if regex_term:
if force_full:
search['query'] = remove_quotes(regex_term.strip())
else:
search['query'] = generate_regex(regex_term.strip())
return search
def gen_global_query(obj,user,term,search_type="global",force_full=False):
"""
Generate a search query. Also calls :func:`check_query` for validation.
:param obj: CRITs Document Object
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user
:type user: str
:param term: Search term
:type term: str
:param search_type: Search type
:type search_type: str
:returns: dict -- The validated query dictionary
"""
type_ = obj._meta['crits_type']
search_list = []
query = {}
# Some terms, regardless of the query, will want to be full search terms and
# not regex terms.
force_full_terms = ['analysis_result', 'ssdeephash']
force = False
# Exclude searches for 'source' or 'releasability'
# This is required because the check_query function doesn't handle
# regex searches for these two fields
if 'source' in search_type or 'releasability' in search_type:
return query
if search_type in force_full_terms or force_full != False:
force = True
parsed_search = parse_search_term(term, force_full=force)
if 'query' not in parsed_search:
return {'success': False,
'ignore': False,
'error': 'No query to search'}
if 'error' in parsed_search['query']:
return {'success': False,
'ignore': False,
'error': parsed_search['query']['error']}
search_query = parsed_search['query']
if 'type' in parsed_search:
t = class_from_type(parsed_search['type'])
if t:
type_ = parsed_search['type']
if obj._meta['crits_type'] != type_:
return {'success': False,
'ignore': True,
'error': 'This type is being ignored.'}
if 'field' in parsed_search:
query = {parsed_search['field']: parsed_search['query']}
defaultquery = check_query({search_type: search_query},user,obj)
sample_queries = {
'size' : {'size': search_query},
'md5hash': {'md5': search_query},
'sha1hash': {'sha1': search_query},
'ssdeephash': {'ssdeep': search_query},
'sha256hash': {'sha256': search_query},
# slow in larger collections
'filename': {'$or': [
{'filename': search_query},
{'filenames': search_query},
]},
'campaign': {'campaign.name': search_query},
# slightly slow in larger collections
'object_value': {'objects.value': search_query},
'bucket_list': {'bucket_list': search_query},
'sectors': {'sectors': search_query},
'source': {'source.name': search_query},
}
# if a specific field is being defined to search against, return early
if 'field' in parsed_search:
if 'filedata' in query:
query = {'filedata': None}
return query
elif search_type == "bucket_list":
query = {'bucket_list': search_query}
elif search_type == "sectors":
query = {'sectors': search_query}
elif search_type == "actor_identifier":
query = {'identifiers.identifier_id': search_query}
# object_ comes from the core/views.py search function.
# It joins search_type with otype
elif search_type.startswith("object_"):
if search_type == "object_value":
query = {"objects.value": search_query}
else:
otypes = search_type.split("_")[1].split(" - ")
if len(otypes) == 1:
query = {"objects": {"$elemMatch": {"name": otypes[0],
"value": search_query}}}
else:
query = {"objects": {"$elemMatch": {"name": otypes[1],
"type": otypes[0],
"value": search_query}}}
elif search_type == "byobject":
query = {'comment': search_query}
elif search_type == "global":
if type_ == "Sample":
search_list.append(sample_queries["object_value"])
search_list.append(sample_queries["filename"])
if len(term) == 32:
search_list.append(sample_queries["md5hash"])
elif type_ == "AnalysisResult":
search_list = [
{'results.result': search_query},
]
elif type_ == "Actor":
search_list = [
{'name': search_query},
{'objects.value': search_query},
]
elif type_ == "Certificate":
search_list = [
{'md5': search_query},
{'objects.value': search_query},
]
elif type_ == "PCAP":
search_list = [
{'md5': search_query},
{'objects.value': search_query},
]
elif type_ == "RawData":
search_list = [
{'md5': search_query},
{'data': search_query},
{'objects.value': search_query},
]
elif type_ == "Signature":
search_list = [
{'md5': search_query},
{'data': search_query},
{'objects.value': search_query},
]
elif type_ == "Indicator":
search_list = [
{'value': search_query},
{'objects.value': search_query}
]
elif type_ == "Domain":
search_list = [
{'domain': search_query},
{'objects.value': search_query}
]
elif type_ == "Email":
search_list = [
{'from': search_query},
{'subject': search_query},
{'raw_body': search_query},
{'raw_headers': search_query},
{'objects.value': search_query},
{'x_originating_ip': search_query},
{'originating_ip': search_query}
]
elif type_ == "Event":
search_list = [
{'description': search_query},
{'title': search_query},
{'objects.value': search_query}
]
elif type_ == "IP":
search_list = [
{'ip': search_query},
{'objects.value': search_query}
]
elif type_ == "Comment":
search_list = [
{'comment': search_query},
]
elif type_ == "Campaign":
search_list = [
{'name': search_query},
{'aliases': search_query},
]
elif type_ == "Screenshot":
search_list = [
{'description': search_query},
{'tags': search_query},
]
elif type_ == "Target":
search_list = [
{'email_address': search_query},
{'firstname': search_query},
{'lastname': search_query},
]
else:
search_list = [{'name': search_query}]
search_list.append({'source.instances.reference':search_query})
search_list.append({'bucket_list': search_query})
search_list.append({'sectors': search_query})
query = {'$or': search_list}
else:
if type_ == "Domain":
query = {'domain': search_query}
elif type_ == "Email":
if search_type == "ip":
query = {'$or': [{'originating_ip': search_query},
{'x_originating_ip': search_query}]}
elif search_type == "reference":
query = {'source.instances.reference': search_query}
else:
query = defaultquery
elif type_ == "RawData":
if search_type == "data":
query = {'data': search_query}
elif search_type == "data_type":
query = {'data_type': search_query}
elif search_type == "title":
query = {'title': search_query}
elif search_type == "tool":
query = {'tool.name': search_query}
else:
query = defaultquery
elif type_ == "Signature":
if search_type == "data":
query = {'data': search_query}
elif search_type == "data_type":
query = {'data_type': search_query}
elif search_type == "title":
query = {'title': search_query}
elif search_type == "tool":
query = {'tool.name': search_query}
else:
query = defaultquery
elif type_ == "Event":
if search_type == "campaign":
query = {'campaign.name': search_query}
elif search_type == "source":
query = {'source.name': search_query}
else:
query = defaultquery
elif type_ == "Indicator":
if search_type == "campaign":
query = {'campaign.name': search_query}
elif search_type == "ticket_number":
query = {'tickets.ticket_number': search_query}
elif search_type == "source":
query = {'source.name': search_query}
elif search_type == "confidence":
query = {'confidence.rating': search_query}
elif search_type == "impact":
query = {'impact.rating': search_query}
else:
query = defaultquery
elif type_ == "IP":
query = {'ip': search_query}
elif type_ == "Sample":
if search_type not in sample_queries:
return {'success': None,
'ignore': False,
'error': 'Search type not in sample queries.'}
query = sample_queries[search_type]
if 'size' in query:
try:
query = {'size': int(query['size'])}
except ValueError:
return {'success': None,
'ignore': False,
'error': 'Size must be an integer.'}
else:
query = defaultquery
return query
def check_query(qparams,user,obj):
"""
Remove and/or filter queries which may cause issues
:param qparams: MongoDB query
:type qparams: dict
:param user: CRITs user
:type user: str
:param obj: CRITs Document Object
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:returns: dict -- The validated query dictionary
"""
# Iterate over the supplied query keys and make sure they start
# with a valid field from the document
goodkeys = {}
for key,val in qparams.items():
# Skip anything with Mongo's special $
if '$' in key:
continue
# Grab the base field for doing the key checks
try:
indx = key.index('.')
field = key[:indx]
except:
field = key
# Check for mapping, reverse because we're going the other way
invmap = dict((v,k) for k, v in obj._db_field_map.iteritems())
if field in invmap:
field = invmap[field]
# Only allow query keys that exist in the object
if hasattr(obj,field):
goodkeys[key] = val
# Filter out invalid queries regarding source/releasability
sourcefilt = user_sources(user)
newquery = goodkeys.copy()
for key in goodkeys:
# Sources
if "source" in key:
if key != "source.name" and key != "source":
del newquery[key]
else:
if goodkeys[key] not in sourcefilt:
del newquery[key]
# Releasability
if "releasability" in key:
if key != "releasability.name" and key != "releasability":
del newquery[key]
else:
if goodkeys[key] not in sourcefilt:
del newquery[key]
return newquery
def data_query(col_obj, user, limit=25, skip=0, sort=[], query={},
projection=[], count=False):
"""
Basic query function
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user (Required)
:type user: str
:param limit: Limit on returned rows
:type limit: int `(25)`
:param skip: Number of rows to skip
:type skip: int `(0)`
:param sort: Fields to sort by (Prepend field name with '-' to reverse sort)
:type sort: list
:param query: MongoDB query
:type query: dict
:param projection: Projection filter to apply to query
:type projection: list
:returns: dict -- Keys are result, data, count, msg, crits_type. 'data'
contains a :class:`crits.core.crits_mongoengine.CritsQuerySet` object.
"""
results = {'result':'ERROR'}
results['data'] = []
results['count'] = 0
results['msg'] = ""
results['crits_type'] = col_obj._meta['crits_type']
sourcefilt = user_sources(user)
if isinstance(sort,basestring):
sort = sort.split(',')
if isinstance(projection,basestring):
projection = projection.split(',')
docs = None
try:
if not issubclass(col_obj,CritsSourceDocument):
results['count'] = col_obj.objects(__raw__=query).count()
if count:
results['result'] = "OK"
return results
if col_obj._meta['crits_type'] == 'User':
docs = col_obj.objects(__raw__=query).exclude('password',
'password_reset',
'api_keys').\
order_by(*sort).skip(skip).\
limit(limit).only(*projection)
else:
docs = col_obj.objects(__raw__=query).order_by(*sort).\
skip(skip).limit(limit).only(*projection)
# Else, all other objects that have sources associated with them
# need to be filtered appropriately
else:
results['count'] = col_obj.objects(source__name__in=sourcefilt,
__raw__=query).count()
if count:
results['result'] = "OK"
return results
docs = col_obj.objects(source__name__in=sourcefilt,__raw__=query).\
order_by(*sort).skip(skip).limit(limit).\
only(*projection)
for doc in docs:
if hasattr(doc, "sanitize_sources"):
doc.sanitize_sources(username="%s" % user, sources=sourcefilt)
except Exception, e:
results['msg'] = "ERROR: %s. Sort performed on: %s" % (e,
', '.join(sort))
return results
results['data'] = docs
results['result'] = "OK"
return results
def csv_query(col_obj,user,fields=[],limit=10000,skip=0,sort=[],query={}):
"""
Runs query and returns items in CSV format with fields as row headers
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user (Required)
:type user: str
:param fields: Fields to return in the CSV
:type fields: list
:param limit: Limit on returned rows
:type limit: int
:param skip: Number of rows to skip
:type skip: int
:param sort: Fields to sort by (Prepend field name with '-' to reverse sort)
:type sort: list
:param query: MongoDB query
:type query: dict
"""
results = data_query(col_obj, user=user, limit=limit,
skip=skip, sort=sort, query=query,
projection=fields)
if results['result'] == "OK":
return results['data'].to_csv(fields)
else:
return results['msg']
def parse_query_request(request,col_obj):
"""
Get query modifiers from a request
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: dict -- Keys are fields, sort, limit, skip
"""
resp = {}
resp['fields'] = request.GET.get('fields',[])
if resp['fields']:
try:
resp['fields'] = resp['fields'].split(',')
except:
return render_to_response("error.html",
{"error": "Invalid fields specified"},
RequestContext(request))
goodfields = []
for field in resp['fields']:
# Skip anything with Mongo's special $
if '$' in field:
continue
# Grab the base field for doing the key checks
try:
indx = field.index('.')
base = field[:indx]
extra = field[indx:]
except:
base = field
extra = ""
# Check for mapping, reverse because we're going the other way
invmap = dict((v,k) for k, v in col_obj._db_field_map.iteritems())
if base in invmap:
base = invmap[base]
# Only allow query keys that exist in the object
if hasattr(col_obj,base):
goodfields.append(base+extra)
resp['fields'] = goodfields
resp['sort'] = request.GET.get('sort',[])
resp['limit'] = int(request.GET.get('limit',10000))
resp['skip'] = int(request.GET.get('skip',0))
return resp
def csv_export(request, col_obj, query={}):
"""
Returns a :class:`django.http.HttpResponse` object which prompts the user
to download a CSV file containing the results from :func:`csv_query`.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param query: MongoDB query
:type query: dict
:returns: :class:`django.http.HttpResponse` -- CSV download response
"""
opts = parse_query_request(request,col_obj)
if not query:
resp = get_query(col_obj, request)
if resp['Result'] == "ERROR":
response = render_to_response("error.html",
{"error": resp['Message'] },
RequestContext(request)
)
return response
query = resp['query']
result = csv_query(col_obj, request.user.username, fields=opts['fields'],
sort=opts['sort'], query=query, limit=opts['limit'],
skip=opts['skip'])
if isinstance(result, basestring):
response = HttpResponse(result, content_type="text/csv")
response['Content-Disposition'] = "attachment;filename=crits-%s-export.csv" % col_obj._meta['crits_type']
else:
response = render_to_response("error.html",
{"error" : result },
RequestContext(request))
return response
def get_query(col_obj,request):
"""
Pull out a query from a request object
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: dict -- The MongoDB query
"""
keymaps = {
"actor_identifier": "identifiers.identifier_id",
"campaign": "campaign.name",
"source": "source.name",
"confidence": "confidence.rating",
"impact": "impact.rating",
"object_value":"objects.value",
"analysis_result":"results.result",
}
term = ""
query = {}
response = {}
params_escaped = {}
for k,v in request.GET.items():
params_escaped[k] = html_escape(v)
urlparams = "?%s" % urlencode(params_escaped)
if "q" in request.GET:
force_full = request.GET.get('force_full', False)
term = request.GET.get('q')
search_type = request.GET.get('search_type',None)
if not search_type:
response['Result'] = "ERROR"
response['Message'] = "No search_type defined"
return response
otype = request.GET.get('otype', None)
if otype:
search_type = search_type + "_" + otype
term = HTMLParser.HTMLParser().unescape(term)
qdict = gen_global_query(col_obj,
request.user.username,
term,
search_type,
force_full=force_full
)
if not qdict.get('success', True):
if qdict.get('ignore', False):
response['Result'] = "IGNORE"
else:
response['Result'] = "ERROR"
response['Message'] = qdict.get('error', 'Unable to process query')
return response
query.update(qdict)
term = request.GET['q']
qparams = request.REQUEST.copy()
qparams = check_query(qparams,request.user.username,col_obj)
for key,value in qparams.items():
if key in keymaps:
key = keymaps[key]
# This one is not a straight rename like the others. If
# searching for x_originating_ip also search for originating_ip,
# and vice versa. This means we have to logically or the query
# where the others do not.
if key in ['x_originating_ip', 'originating_ip']:
query["$or"] = [
{"x_originating_ip": value},
{"originating_ip": value}
]
elif key in ['size', 'length']:
try:
query[key] = int(value)
except ValueError:
results = {}
results['Result'] = "ERROR"
results['Message'] = "'size' requires integer, not %s" % value
return results
else:
query[key] = value
term = term + " " + value
results = {}
results['Result'] = "OK"
results['query'] = query
results['term'] = term
results['urlparams'] = urlparams
return results
def jtable_ajax_list(col_obj,url,urlfieldparam,request,excludes=[],includes=[],query={}):
"""
Handles jTable listing POST requests
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param url: Base URL for objects. Ex ``crits.domains.views.domain_detail``
:type url: str
:param urlfieldparam: Field to use for the item detail's URL key. Passed
as arg with ``url`` to :func:`django.core.urlresolvers.reverse`
:type urlfieldparam: str
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param excludes: Fields to exclude
:type excludes: list
:param includes: Fields to include
:type includes: list
:param query: MongoDB query
:type query: dict
"""
response = {"Result": "ERROR"}
users_sources = user_sources(request.user.username)
if request.is_ajax():
pageSize = request.user.get_preference('ui','table_page_size',25)
# Thought these were POSTs...GET works though
skip = int(request.GET.get("jtStartIndex", "0"))
if "jtLimit" in request.GET:
pageSize = int(request.GET['jtLimit'])
else:
pageSize = int(request.GET.get("jtPageSize", pageSize))
# Set the sort order
sort = request.GET.get("jtSorting", urlfieldparam+" ASC")
keys = sort.split(',')
multisort = []
keymaps = {
"actor_identifier": "identifiers.identifier_id",
"campaign": "campaign.name",
"source": "source.name",
"confidence": "confidence.rating",
"impact": "impact.rating",
"object_value": "objects.value",
"analysis_result": "results.result",
}
for key in keys:
(keyname, keyorder) = key.split()
if keyname in keymaps:
keyname = keymaps[keyname]
if keyorder == "DESC":
keyname = "-%s" % keyname
multisort.append(keyname)
# Build the query
term = ""
if not query:
resp = get_query(col_obj, request)
if resp['Result'] in ["ERROR", "IGNORE"]:
return resp
query = resp['query']
term = resp['term']
response = data_query(col_obj, user=request.user.username, limit=pageSize,
skip=skip, sort=multisort, query=query,
projection=includes)
if response['result'] == "ERROR":
return {'Result': "ERROR", 'Message': response['msg']}
response['crits_type'] = col_obj._meta['crits_type']
# Escape term for rendering in the UI.
response['term'] = cgi.escape(term)
response['data'] = response['data'].to_dict(excludes, includes)
# Convert data_query to jtable stuff
response['Records'] = response.pop('data')
response['TotalRecordCount'] = response.pop('count')
response['Result'] = response.pop('result')
for doc in response['Records']:
for key, value in doc.items():
# all dates should look the same
if isinstance(value, datetime.datetime):
doc[key] = datetime.datetime.strftime(value,
"%Y-%m-%d %H:%M:%S")
if key == "password_reset":
doc['password_reset'] = None
if key == "campaign":
camps = []
for campdict in value:
camps.append(campdict['name'])
doc[key] = "|||".join(camps)
elif key == "source":
srcs = []
for srcdict in doc[key]:
if srcdict['name'] in users_sources:
srcs.append(srcdict['name'])
doc[key] = "|||".join(srcs)
elif key == "tags":
tags = []
for tag in doc[key]:
tags.append(tag)
doc[key] = "|||".join(tags)
elif key == "is_active":
if value:
doc[key] = "True"
else:
doc[key] = "False"
elif key == "datatype":
doc[key] = value.keys()[0]
elif key == "results":
doc[key] = len(doc[key])
elif key == "preferred":
final = ""
for p in doc[key]:
final += p['object_type']
final += "|"
final += p['object_field']
final += "|"
final += p['object_value']
final += "||"
doc[key] = final
elif isinstance(value, list):
if value:
for item in value:
if not isinstance(item, basestring):
break
else:
doc[key] = ",".join(value)
else:
doc[key] = ""
doc[key] = html_escape(doc[key])
if col_obj._meta['crits_type'] == "Comment":
mapper = {
"Actor": 'crits.actors.views.actor_detail',
"Campaign": 'crits.campaigns.views.campaign_details',
"Certificate": 'crits.certificates.views.certificate_details',
"Domain": 'crits.domains.views.domain_detail',
"Email": 'crits.emails.views.email_detail',
"Event": 'crits.events.views.view_event',
"Indicator": 'crits.indicators.views.indicator',
"IP": 'crits.ips.views.ip_detail',
"PCAP": 'crits.pcaps.views.pcap_details',
"RawData": 'crits.raw_data.views.raw_data_details',
"Sample": 'crits.samples.views.detail',
"Signature": 'crits.signatures.views.detail',
}
doc['url'] = reverse(mapper[doc['obj_type']],
args=(doc['url_key'],))
elif col_obj._meta['crits_type'] == "AuditLog":
if doc.get('method', 'delete()') != 'delete()':
doc['url'] = details_from_id(doc['type'],
doc.get('target_id', None))
elif not url:
doc['url'] = None
else:
doc['url'] = reverse(url, args=(unicode(doc[urlfieldparam]),))
return response
def jtable_ajax_delete(obj,request):
"""
Delete a document specified in the jTable POST.
:param obj: MongoEngine collection object (Required)
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: bool -- True if item was deleted
"""
# Only admins can delete
if not is_admin(request.user.username):
return False
# Make sure we are supplied _id
if not "id" in request.POST:
return False
docid = request.POST['id']
if not docid:
return False
# Finally, make sure there is a related document
doc = obj.objects(id=docid).first()
if not doc:
return False
if "delete_all_relationships" in dir(doc):
doc.delete_all_relationships()
# For samples/pcaps
if "filedata" in dir(doc):
doc.filedata.delete()
doc.delete(username=request.user.username)
return True
def build_jtable(jtopts, request):
"""
Build a dictionary containing proper jTable options.
:param jtopts: Python dictionary containing jTable options.
:type jtopts: dict.
:param request: Current Django request
:type request: :class:`django.http.HttpRequest`
:returns: dict -- Contains the jTable configuration used by the template.
**jtopts supports the following keys**
**Required**
*title*
Contains the jTable title.
*listurl*
URL for the Django view that returns the data in JSON.
*searchurl*
URL to use when filtering data, usually the base URL for the view,
without any options.
*fields*
Python list containing the fields to show for a document. The
first item will be linked to the details view.
**Optional**
*default_sort*
Defines the field and order to sort by.
Ex. "field <ASC|DESC>"
Default: FirstField ASC
*deleteurl*
URL for Django view to delete an item
*no_sort*
Python list containing which fields to disable sorting
*hidden_fields*
Python list containing which fields to hide. This list is a
subset of 'fields'
*linked_fields*
Python list containing which fields should allow filtering.
*paging*
Allow paging on this jTable.
Default: true
*pageSize*
Number of rows per page
Deafult: User Preference (defaults to 25)
*sorting*
Allow sorting by column on this jTable
Default: true
*multiSorting*
Allow sorting by multiple columns on this jTable
Default: true
*details_link*
Define the field that should link to the details
Default: First field
If specified as '__disable__', then no linking will occur
If specified as 'details', an icon is used for the link
"""
# Check for required values
if not all(required in jtopts for required in ['listurl','searchurl','fields','title']):
raise KeyError("Missing required key for jtopts in build_jtable")
return None
# jTable requires a key for the field
# Mongo provides _id as a unique identifier, so we will require that
if "id" not in jtopts['fields']:
jtopts['fields'].append("id")
# If we push the _id field on, we will also hide it by default
if 'hidden_fields' in jtopts:
jtopts['hidden_fields'].append("id")
else:
jtopts['hidden_fields'] = ["id",]
pageSize = request.user.get_preference('ui','table_page_size',25)
# Default jTable options
default_options = {
"paging" : "true",
"pageSize": pageSize,
"sorting": "true",
"multiSorting": "true",
}
# Default widths for certain columns in the jTable
colwidths = {
"details": "'2%'",
'recip': "'2%'",
"comment":"'15%'",
"date":"'8%'",
"isodate":"'8%'",
"id":"'4%'",
"favorite":"'4%'",
"actions":"'4%'",
"size":"'4%'",
"added":"'8%'",
"created":"'8%'",
"modified":"'8%'",
"subject":"'17%'",
"value":"'18%'",
"type":"'10%'",
"filetype":"'15%'",
"status":"'5%'",
"source":"'7%'",
"campaign":"'7%'",
}
# Mappings for the column titles
titlemaps = {
"Isodate": "Date",
"Created": "Added",
"Ip": "IP",
"Id": "Store ID",
}
jtable = {}
# This allows overriding of default options if they are specified in jtopts
for defopt,defval in default_options.items():
if defopt in jtopts:
jtable[defopt] = jtopts[defopt]
else:
jtable[defopt] = defval
# Custom options
if 'title' in jtopts:
jtable['title'] = jtopts['title']
else:
jtable['title'] = ""
jtable['defaultSorting'] = jtopts['default_sort']
# Define jTable actions
jtable['actions'] = {}
# List action
# If we have get parameters, append them
if request.GET:
jtable['actions']['listAction'] = jtopts['listurl'] + "?"+request.GET.urlencode(safe='@')
else:
jtable['actions']['listAction'] = jtopts['listurl']
# Delete action
# If user is admin and deleteurl is set, provide a delete action in jTable
if ( is_admin(request.user.username) and
'deleteurl' in jtopts and jtopts['deleteurl'] ):
jtable['actions']['deleteAction'] = jtopts['deleteurl']
# We don't have any views available for these actions
#jtable['actions']['createAction'] = reverse()
#jtable['actions']['updateAction'] = reverse()
# Generate the fields
jtable['fields'] = []
for field in jtopts['fields']:
fdict = {}
# Create the column title here
title = field.replace("_"," ").title().strip()
if title in titlemaps:
title = titlemaps[title]
# Some options require quotes, so we use "'%s'" to quote them
fdict['title'] = "'%s'" % title
fdict['fieldname'] = "'%s'" % field
if field in colwidths:
fdict['width'] = colwidths[field]
# Every jTable needs a key. All our items in Mongo have a unique _id
# identifier, so by default we always include that here as the key
if field == "id":
fdict['key'] = "true"
fdict['display'] = """function (data) { return '<div class="icon-container"><span id="'+data.record.id+'" class="id_copy ui-icon ui-icon-copy"></span></div>';}"""
if field == "favorite":
fdict['display'] = """function (data) { return '<div class="icon-container"><span id="'+data.record.id+'" class="favorites_icon_jtable ui-icon ui-icon-star"></span></div>';}"""
if field == "actions":
fdict['display'] = """function (data) { return '<div class="icon-container"><span data-id="'+data.record.id+'" id="'+data.record.id+'" class="preferred_actions_jtable ui-icon ui-icon-heart"></span></div>';}"""
if field == "thumb":
fdict['display'] = """function (data) { return '<img src="%s'+data.record.id+'/thumb/" />';}""" % reverse('crits.screenshots.views.render_screenshot')
if field == "description" and jtable['title'] == "Screenshots":
fdict['display'] = """function (data) { return '<span class="edit_underline edit_ss_description" data-id="'+data.record.id+'">'+data.record.description+'</span>';}"""
if 'no_sort' in jtopts and field in jtopts['no_sort']:
fdict['sorting'] = "false"
if 'hidden_fields' in jtopts and field in jtopts['hidden_fields']:
# hide the row but allow the user to show it
fdict['visibility'] = '"hidden"'
# This creates links for certain jTable columns
# It will link anything listed in 'linked_fields'
campbase = reverse('crits.campaigns.views.campaign_details',args=('__CAMPAIGN__',))
# If linked_fields is not specified lets link source and campaign
# if they exist as fields in the jTable
if 'linked_fields' not in jtopts:
jtopts['linked_fields'] = []
if 'source' in jtopts['fields']:
jtopts['linked_fields'].append("source")
if 'campaign' in jtopts['fields']:
jtopts['linked_fields'].append("campaign")
if field in jtopts['linked_fields']:
fdict['display'] = """function (data) {
return link_jtable_column(data, '%s', '%s', '%s');
} """ % (field, jtopts['searchurl'], campbase)
jtable['fields'].append(fdict)
if 'details_link' in jtopts:
if jtopts['details_link'] == "__disabled__":
return jtable
else:
if jtopts['details_link'] not in jtopts['fields']:
return jtable
# Link the field in details_link
linkfield = "'%s'" % jtopts["details_link"]
for i,field in enumerate(jtable['fields']):
if field['fieldname'] != linkfield:
continue
if field['fieldname'] == "'details'":
jtable['fields'][i]['display'] = 'function (data) {if (!data.record.url) { return '';}; return \'<a href="\'+data.record.url+\'" target="_parent"><div class="icon-container"><span class="ui-icon ui-icon-document" title="View Details"></span></div></a>\';}'
else:
jtable['fields'][i]['display'] = "function (data) {return '<a href=\"'+data.record.url+'\">'+data.record."+jtopts['fields'][i]+"+'</a>';}"
else:
# Provide default behavior
if jtable['fields'][0]['fieldname'] == "'details'":
jtable['fields'][0]['display'] = 'function (data) {return \'<a href="\'+data.record.url+\'"><div class="icon-container"><span class="ui-icon ui-icon-document" title="View Details"></span></div></a>\';}'
else:
jtable['fields'][0]['display'] = "function (data) {return '<a href=\"'+data.record.url+'\">'+data.record."+jtopts['fields'][0]+"+'</a>';}"
return jtable
def generate_items_jtable(request, itype, option):
"""
Generate a jtable list for the Item provided.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param itype: The CRITs item we want to list.
:type itype: str
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = class_from_type(itype)
if itype == 'ActorThreatIdentifier':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#actor_identifier_type_add').click();}"
elif itype == 'Campaign':
fields = ['name', 'description', 'active', 'id']
click = "function () {window.parent.$('#new-campaign').click();}"
elif itype == 'Action':
fields = ['name', 'active', 'object_types', 'preferred', 'id']
click = "function () {window.parent.$('#action_add').click();}"
elif itype == 'RawDataType':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#raw_data_type_add').click();}"
elif itype == 'SignatureType':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#signature_type_add').click();}"
elif itype == 'SignatureDependency':
fields = ['name', 'id']
click = "function () {window.parent.$('#signature_dependency_add').click();}"
elif itype == 'SourceAccess':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#source_create').click();}"
elif itype == 'UserRole':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#user_role').click();}"
if option == 'jtlist':
details_url = None
details_url_key = 'name'
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
'''Special case for dependency, to allow for deletions, no more toggle on dependencies '''
''' This is modified here to fit with rest of code, there is no delete field in mongo, but the user can delete '''
if itype == 'SignatureDependency':
fields = ['name', 'delete', 'id']
jtopts = {
'title': "%ss" % itype,
'default_sort': 'name ASC',
'listurl': reverse('crits.core.views.items_listing',
args=(itype, 'jtlist',)),
'deleteurl': None,
'searchurl': None,
'fields': fields,
'hidden_fields': ['id'],
'linked_fields': [],
'details_link': '',
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add %s'" % itype,
'text': "'Add %s'" % itype,
'click': click,
},
]
for field in jtable['fields']:
if field['fieldname'].startswith("'active"):
field['display'] = """ function (data) {
return '<a id="is_active_' + data.record.id + '" href="#" onclick=\\'javascript:toggleItemActive("%s","'+data.record.id+'");\\'>' + data.record.active + '</a>';
}
""" % itype
if field['fieldname'].startswith("'name"):
field['display'] = """ function (data) { return '<a href="#" onclick=\\'javascript:editAction("'+data.record.name+'", "'+data.record.object_types+'", "'+data.record.preferred+'");\\'>' + data.record.name + '</a>';
}
"""
'''special case for signature dependency, add a delete button to allow for removal'''
if itype == 'SignatureDependency':
for field in jtable['fields']:
if field['fieldname'].startswith("'delete"):
field['display'] = """ function (data) {
return '<button title="Delete" class="jtable-command-button jtable-delete-command-button" id="to_delete_' + data.record.id + '" href="#" onclick=\\'javascript:deleteSignatureDependency("%s","'+data.record.id+'");\\'><span>Delete</span></button>';
}
""" % itype
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%ss_listing' % itype.lower(),
'button': '%ss_tab' % itype.lower()},
RequestContext(request))
else:
return render_to_response("item_editor.html",
{'jtable': jtable,
'jtid': 'items_listing'},
RequestContext(request))
def generate_users_jtable(request, option):
"""
Generate a jtable list for Users.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = CRITsUser
if option == 'jtlist':
details_url = None
details_url_key = 'username'
fields = ['username', 'first_name', 'last_name', 'email',
'last_login', 'organization', 'role', 'is_active',
'id']
excludes = ['login_attempts']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields,
excludes=excludes)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
jtopts = {
'title': "Users",
'default_sort': 'username ASC',
'listurl': reverse('crits.core.views.users_listing', args=('jtlist',)),
'deleteurl': None,
'searchurl': None,
'fields': ['username', 'first_name', 'last_name', 'email',
'last_login', 'organization', 'role', 'is_active',
'id'],
'hidden_fields': ['id'],
'linked_fields': []
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add User'",
'text': "'Add User'",
'click': "function () {editUser('');}",
},
]
for field in jtable['fields']:
if field['fieldname'].startswith("'username"):
field['display'] = """ function (data) {
return '<a class="user_edit" href="#" onclick=\\'javascript:editUser("'+data.record.username+'");\\'>' + data.record.username + '</a>';
}
"""
if field['fieldname'].startswith("'is_active"):
field['display'] = """ function (data) {
return '<a id="is_active_' + data.record.username + '" href="#" onclick=\\'javascript:toggleUserActive("'+data.record.username+'");\\'>' + data.record.is_active + '</a>';
}
"""
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': 'users_listing'},
RequestContext(request))
else:
return render_to_response("user_editor.html",
{'jtable': jtable,
'jtid': 'users_listing'},
RequestContext(request))
def generate_dashboard(request):
"""
Generate the Dashboard.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
from crits.dashboards.handlers import get_dashboard
args = get_dashboard(request.user)
return render_to_response('dashboard.html', args, RequestContext(request))
def dns_timeline(query, analyst, sources):
"""
Query for domains, format that data for timeline view, and return them.
:param query: The query to use to find the Domains.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
domains = Domain.objects(__raw__=query)
offline = ['255.255.255.254', '127.0.0.1', '127.0.0.2', '0.0.0.0']
event_id = 0
events = []
for d in domains:
d.sanitize_sources(username=analyst,
sources=sources)
domain = d.domain
state = "off"
ip_list = [r for r in d.relationships if r.rel_type == 'IP']
ip_list = sorted(ip_list, key=itemgetter('relationship_date'), reverse=False)
description = ""
e = {}
for ipl in ip_list:
ip = IP.objects(ip=ipl.object_id,
source__name__in=sources).first()
if ipl['relationship_date'] is None:
continue
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
if ip and ip.ip in offline:
if state == "on":
e['enddate'] = datetime.datetime.strftime(ipl['relationship_date'],
settings.PY_DATETIME_FORMAT)
e['description'] = description
state = "off"
events.append(e)
description = ""
e = {}
elif state == "off":
pass
elif ip:
if state == "on":
description += "<br /><b><a style=\"display: inline;\" href=\"%s\">%s</a>:</b> %s" % (reverse('crits.ips.views.ip_detail', args=[ip.ip]), ip.ip, ipl['relationship_date'])
elif state == "off":
e['startdate'] = datetime.datetime.strftime(ipl['relationship_date'],
settings.PY_DATETIME_FORMAT)
e['title'] = domain
description += "<br /><b><a style=\"display: inline;\" href=\"%s\">%s</a>:</b> %s" % (reverse('crits.ips.views.ip_detail', args=[ip.ip]), ip.ip, ipl['relationship_date'])
state = "on"
return events
def email_timeline(query, analyst, sources):
"""
Query for emails, format that data for timeline view, and return them.
:param query: The query to use to find the Emails.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
emails = Email.objects(__raw__=query)
events = []
event_id = 0
for email in emails:
email.sanitize_sources(username=analyst,
sources=sources)
email = email.to_dict()
if "source" in email and email["source"][0] is not None:
e = {}
e['title'] = ""
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
if "from" in email:
if email["from"]:
e['title'] += email["from"]
if "campaign" in email:
try:
if "name" in email["campaign"][0]:
e['title'] += " (%s)" % email["campaign"][0]["name"]
except:
pass
if "source" in email:
if "name" in email["source"][0]:
e['title'] += " (%s)" % email["source"][0]["name"]
description = ""
sources = []
if "from" in email:
description += "<br /><b>%s</b>: <a style=\"display: inline;\" href=\"%s\">%s</a>" % \
(email["from"],
reverse('crits.emails.views.email_detail', args=[email['_id']]),
email["from"])
if "isodate" in email:
e['startdate'] = "%s" % email["isodate"]
else:
if "source" in email:
e['startdate'] = "%s" % email["source"][0]['instances'][0]["date"]
if "source" in email:
description += "<br /><hr><b>Source:</b>"
for source in email["source"]:
if "name" in source and "instances" in source:
description += "<br /><b>%s</b>: %s" % (source["name"],
source['instances'][0]["date"])
e['description'] = description
events.append(e)
return events
def indicator_timeline(query, analyst, sources):
"""
Query for indicators, format that data for timeline view, and return them.
:param query: The query to use to find the Indicators.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
indicators = Indicator.objects(__raw__=query)
events = []
event_id = 0
for indicator in indicators:
indicator.sanitize_sources(username=analyst,
sources=sources)
indicator = indicator.to_dict()
e = {}
e['title'] = indicator['value']
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
e['startdate'] = indicator['created'].strftime("%Y-%m-%d %H:%M:%S.%Z")
description = ""
description += "<br /><b>Value</b>: <a style=\"display: inline;\" href=\"%s\">%s</a>" % (reverse('crits.indicators.views.indicator', args=[indicator['_id']]), indicator['value'])
description += "<br /><b>Type</b>: %s" % indicator['type']
description += "<br /><b>Created</b>: %s" % indicator['created']
e['description'] = description
events.append(e)
return events
def generate_user_profile(username, request):
"""
Generate the user profile page.
:param username: The user profile to generate.
:type username: str
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
user_source_access = user_sources(username)
user_source_access.sort()
limit = 5
user_info = CRITsUser.objects(username=username).first()
if not user_info:
return {"status": "ERROR", "message": "User not found"}
# recent indicators worked on
query = {'$or': [{'actions.analyst': "%s" % username},
{'activity.analyst': "%s" % username},
{'objects.analyst': "%s" % username}]}
indicator_list = (Indicator.objects(__raw__=query)
.only('value',
'ind_type',
'created',
'campaign',
'source',
'status')
.order_by('-created')
.limit(limit)
.sanitize_sources(username))
# recent emails worked on
query = {'campaign.analyst': "%s" % username}
email_list = (Email.objects(__raw__=query)
.order_by('-date')
.limit(limit)
.sanitize_sources(username))
# samples
sample_md5s = (AuditLog.objects(user=username,
target_type="Sample")
.order_by('-date')
.limit(limit))
md5s = []
for sample in sample_md5s:
md5s.append(sample.value.split(" ")[0])
filter_data = ('md5', 'source', 'filename', 'mimetype',
'size', 'campaign')
sample_list = (Sample.objects(md5__in=md5s)
.only(*filter_data)
.sanitize_sources(username))
subscriptions = user_info.subscriptions
subscription_count = 0
# collect subscription information
if 'Sample' in subscriptions:
subscription_count += len(subscriptions['Sample'])
final_samples = []
ids = [ObjectId(s['_id']) for s in subscriptions['Sample']]
samples = Sample.objects(id__in=ids).only('md5', 'filename')
m = map(itemgetter('_id'), subscriptions['Sample'])
for sample in samples:
s = sample.to_dict()
s['md5'] = sample['md5']
s['id'] = sample.id
s['date'] = subscriptions['Sample'][m.index(sample.id)]['date']
final_samples.append(s)
subscriptions['Sample'] = final_samples
if 'PCAP' in subscriptions:
subscription_count += len(subscriptions['PCAP'])
final_pcaps = []
ids = [ObjectId(p['_id']) for p in subscriptions['PCAP']]
pcaps = PCAP.objects(id__in=ids).only('md5', 'filename')
m = map(itemgetter('_id'), subscriptions['PCAP'])
for pcap in pcaps:
p = pcap.to_dict()
p['id'] = pcap.id
p['date'] = subscriptions['PCAP'][m.index(pcap.id)]['date']
final_pcaps.append(p)
subscriptions['PCAP'] = final_pcaps
if 'Email' in subscriptions:
subscription_count += len(subscriptions['Email'])
final_emails = []
ids = [ObjectId(e['_id']) for e in subscriptions['Email']]
emails = Email.objects(id__in=ids).only('from_address',
'sender',
'subject')
m = map(itemgetter('_id'), subscriptions['Email'])
for email in emails:
e = email.to_dict()
e['id'] = email.id
e['date'] = subscriptions['Email'][m.index(email.id)]['date']
final_emails.append(e)
subscriptions['Email'] = final_emails
if 'Indicator' in subscriptions:
subscription_count += len(subscriptions['Indicator'])
final_indicators = []
ids = [ObjectId(i['_id']) for i in subscriptions['Indicator']]
indicators = Indicator.objects(id__in=ids).only('value', 'ind_type')
m = map(itemgetter('_id'), subscriptions['Indicator'])
for indicator in indicators:
i = indicator.to_dict()
i['id'] = indicator.id
i['date'] = subscriptions['Indicator'][m.index(indicator.id)]['date']
final_indicators.append(i)
subscriptions['Indicator'] = final_indicators
if 'Event' in subscriptions:
subscription_count += len(subscriptions['Event'])
final_events = []
ids = [ObjectId(v['_id']) for v in subscriptions['Event']]
events = Event.objects(id__in=ids).only('title', 'description')
m = map(itemgetter('_id'), subscriptions['Event'])
for event in events:
e = event.to_dict()
e['id'] = event.id
e['date'] = subscriptions['Event'][m.index(event.id)]['date']
final_events.append(e)
subscriptions['Event'] = final_events
if 'Domain' in subscriptions:
subscription_count += len(subscriptions['Domain'])
final_domains = []
ids = [ObjectId(d['_id']) for d in subscriptions['Domain']]
domains = Domain.objects(id__in=ids).only('domain')
m = map(itemgetter('_id'), subscriptions['Domain'])
for domain in domains:
d = domain.to_dict()
d['id'] = domain.id
d['date'] = subscriptions['Domain'][m.index(domain.id)]['date']
final_domains.append(d)
subscriptions['Domain'] = final_domains
if 'IP' in subscriptions:
subscription_count += len(subscriptions['IP'])
final_ips = []
ids = [ObjectId(a['_id']) for a in subscriptions['IP']]
ips = IP.objects(id__in=ids).only('ip')
m = map(itemgetter('_id'), subscriptions['IP'])
for ip in ips:
i = ip.to_dict()
i['id'] = ip.id
i['date'] = subscriptions['IP'][m.index(ip.id)]['date']
final_ips.append(i)
subscriptions['IP'] = final_ips
if 'Campaign' in subscriptions:
subscription_count += len(subscriptions['Campaign'])
final_campaigns = []
ids = [ObjectId(c['_id']) for c in subscriptions['Campaign']]
campaigns = Campaign.objects(id__in=ids).only('name')
m = map(itemgetter('_id'), subscriptions['Campaign'])
for campaign in campaigns:
c = campaign.to_dict()
c['id'] = campaign.id
c['date'] = subscriptions['Campaign'][m.index(campaign.id)]['date']
final_campaigns.append(c)
subscriptions['Campaign'] = final_campaigns
# Collect favorite information
favorites = user_info.favorites.to_dict()
collected_favorites = {}
total_favorites = 0
for type_ in favorites.keys():
ids = [ObjectId(f) for f in favorites[type_]]
if ids:
count = class_from_type(type_).objects(id__in=ids).count()
else:
count = 0
total_favorites += count
url = reverse('crits.core.views.favorites_list', args=(type_, 'inline'))
collected_favorites[type_] = {
'count': count,
'url': url
}
#XXX: this can be removed after jtable
notifications = get_user_notifications(username)
result = {'username': username,
'user_info': user_info,
'user_sources': user_source_access,
'indicators': indicator_list,
'emails': email_list,
'favorites': collected_favorites,
'total_favorites': total_favorites,
'notifications': notifications,
'samples': sample_list,
'subscriptions': subscriptions,
'subscription_count': subscription_count,
'ui_themes': ui_themes(),
'rt_url': settings.RT_URL}
result['preferences'] = generate_user_preference(request)
return result
def generate_favorites_jtable(request, type_, option):
"""
Generate favorites jtable.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param type_: The type of CRITs object.
:type type_: str
:returns: :class:`django.http.HttpResponse`
"""
klass = class_from_type(type_)
mapper = klass._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
user = CRITsUser.objects(username=request.user.username).only('favorites').first()
favorites = user.favorites.to_dict()
ids = [ObjectId(s) for s in favorites[type_]]
query = {'_id': {'$in': ids}}
response = jtable_ajax_list(klass,
details_url,
details_url_key,
request,
includes=fields,
query=query)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': type_ + 's',
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.core.views.favorites_list', args=(type_, 'jtlist')),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_user_preference(request,section=None,key=None,name=None):
"""
Generate user preferences.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param section: The section of the preferences to return.
:type section: str
:param key: The specific preference field within the section
to be retrieved.
:type key: str
:param name: The section of the preferences to return.
:type name: This is used to differentiate between different
preference under the same "section" and "key".
Otherwise the first "section" name that matches will
be returned. For example there may be two
different "notify" sections and also
two different "toggle" keys. But the "key" matching
the "name" value will be returned.
:returns: list
"""
# Returned as an array to maintain the order
# could also have a key/value and a ordered array
from crits.core.forms import PrefUIForm, NavMenuForm, ToastNotificationConfigForm
toast_notifications_title = "Toast Notifications"
config = CRITsConfig.objects().first()
if not config.enable_toasts:
toast_notifications_title += " (currently globally disabled by an admin)"
preferences = [
{'section': 'notify',
'title': 'Notifications',
'toggle': 'email',
'enabled': get_user_email_notification(request.user.username),
'name': 'Email Notifications'
},
{'section': 'toast_notifications',
'title': toast_notifications_title,
'form': ToastNotificationConfigForm(request),
'formclass': ToastNotificationConfigForm,
},
{'section': 'ui',
'title': 'UI Settings',
'form': PrefUIForm(request),
'formclass': PrefUIForm,
'reload': True },
{'section': 'nav',
'form': NavMenuForm(request),
'formclass': NavMenuForm,
'name': 'Navigation Menu',
'title': 'Navigation Menu',
'reload': True },
]
# Only return the requested section as hash
if section:
for pref in preferences:
if key:
if pref['section'] == section and pref[key] == name:
return pref
else:
if pref['section'] == section:
return pref
return preferences
def reset_user_password(username=None, action=None, email=None,
submitted_rcode=None, new_p=None, new_p_c=None,
analyst=None):
"""
Handle the process of resetting a user's password.
:param username: The user resetting their password.
:type username: str
:param action: What action we need to take:
- send_email: sends email to user with reset code
- submit_reset_code: validate the reset code
- submit_passwords: reset the password
:type action: str
:param email: The user's email address.
:type email: str
:param submitted_rcode: The reset code submitted by the user.
:type submitted_rcode: str
:param new_p: The new password provided by the user.
:type new_p: str
:param new_p_c: The new password confirmation provided by the user.
:type new_p_c: str
:param analyst: The user submitting these changes.
:type analyst: str
:returns: :class:`django.http.HttpResponse`
"""
if action not in ('send_email', 'submit_reset_code', 'submit_passwords'):
response = {'success': False, 'message': 'Invalid action'}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
user = CRITsUser.objects(username=username, email=email).first()
if not user:
# make it seem like this worked even if it didn't to prevent people
# from brute forcing usernames and email addresses.
response = {'success': True, 'message': 'Instructions sent to %s' % email}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if action == 'send_email':
rcode = user.set_reset_code(analyst)
crits_config = CRITsConfig.objects().first()
if crits_config.crits_email_end_tag:
subject = "CRITs Password Reset" + crits_config.crits_email_subject_tag
else:
subject = crits_config.crits_email_subject_tag + "CRITs Password Reset"
body = """You are receiving this email because someone has requested a
password reset for your account. If it was not you, please log
into CRITs immediately which will remove the reset code from your
account. If it was you, here is your reset code:\n\n
"""
body += "%s\n\n" % rcode
body += """You have five minutes to reset your password before this
reset code expires.\n\nThank you!
"""
user.email_user(subject, body)
response = {'success': True, 'message': 'Instructions sent to %s' % email}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if action == 'submit_reset_code':
return HttpResponse(json.dumps(user.validate_reset_code(submitted_rcode,
analyst),
default=json_handler),
content_type="application/json")
if action == 'submit_passwords':
return HttpResponse(json.dumps(user.reset_password(submitted_rcode,
new_p, new_p_c,
analyst),
default=json_handler),
content_type="application/json")
def login_user(username, password, next_url=None, user_agent=None,
remote_addr=None, accept_language=None, request=None,
totp_pass=None):
"""
Handle the process of authenticating a user.
:param username: The user authenticating to the system.
:type username: str
:param password: The password provided by the user.
:type password: str
:param next_url: The URL to redirect to after successful login.
:type next_url: str
:param user_agent: The user-agent of the request.
:type user_agent: str
:param remote_addr: The remote-address of the request.
:type remote_addr: str
:param accept_language: The accept-language of the request.
:type accept_language: str
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:param totp_pass: The TOTP password provided by the user.
:type totp_pass: str
:returns: dict with keys:
"success" (boolean),
"type" (str) - Type of failure,
"message" (str)
"""
error = 'Unknown user or bad password.'
response = {}
crits_config = CRITsConfig.objects().first()
if not crits_config:
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
if request:
totp = crits_config.totp_web
else:
totp = crits_config.totp_cli
# Do the username and password authentication
# TOTP is passed here so that authenticate() can check if
# the threshold has been exceeded.
user = authenticate(username=username,
password=password,
user_agent=user_agent,
remote_addr=remote_addr,
accept_language=accept_language,
totp_enabled=totp)
if user:
if totp == 'Required' or (totp == 'Optional' and user.totp):
# Remote user auth'd but has not seen TOTP screen yet
if crits_config.remote_user and not totp_pass:
response['success'] = False
response['type'] = "totp_required"
response['message'] = "TOTP required"
return response
e = EmbeddedLoginAttempt(user_agent=user_agent,
remote_addr=remote_addr,
accept_language=accept_language)
secret = user.secret
if not secret and not totp_pass:
response['success'] = False
response['type'] = "no_secret"
response['message'] = ("You have no TOTP secret. Please enter "
"a new PIN in the TOTP field.")
return response
elif not secret and totp_pass:
response['success'] = False
response['type'] = "secret_generated"
res = save_user_secret(username, totp_pass, "crits", (200,200))
if res['success']:
user.reload()
secret = res['secret']
if not request:
response['secret'] = secret
return response
message = "Setup your authenticator using: '%s'" % secret
message += "<br />Then authenticate again with your PIN + token."
if res['qr_img']:
message += '<br /><img src="data:image/png;base64,'
message += '%s" />' % res['qr_img']
response['message'] = message
else:
response['message'] = "Secret Generation Failed"
return response
elif not valid_totp(username, totp_pass, secret):
e.success = False
user.login_attempts.append(e)
user.invalid_login_attempts += 1
user.save()
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
e.success = True
user.login_attempts.append(e)
user.save()
if user.is_active:
user.invalid_login_attempts = 0
user.password_reset.reset_code = ""
user.save()
if crits_config and request:
request.session.set_expiry(crits_config.session_timeout * 60 * 60)
elif request:
request.session.set_expiry(settings.SESSION_TIMEOUT)
if request:
user_login(request, user)
response['type'] = "login_successful"
# Redirect to next or default dashboard
if next_url is not None and next_url != '' and next_url != 'None':
try:
# test that we can go from URL to view to URL
# to validate the URL is something we know about.
# We use get_script_prefix() here to tell us what
# the script prefix is configured in Apache.
# We strip it out so resolve can work properly, and then
# redirect to the full url.
prefix = get_script_prefix()
tmp_url = next_url
if next_url.startswith(prefix):
tmp_url = tmp_url.replace(prefix, '/', 1)
res = resolve(tmp_url)
url_name = res.url_name
args = res.args
kwargs = res.kwargs
redir = reverse(url_name, args=args, kwargs=kwargs)
del redir
response['success'] = True
response['message'] = next_url
except:
response['success'] = False
response['message'] = 'ALERT - attempted open URL redirect attack to %s. Please report this to your system administrator.' % next_url
return response
response['success'] = True
if 'message' not in response:
response['message'] = reverse('crits.dashboards.views.dashboard')
return response
else:
logger.info("Attempted login to a disabled account detected: %s" %
user.username)
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
def generate_global_search(request):
"""
Generate global search results.
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:returns: dict with keys:
"url_params" (str),
"term" (str) - the search term,
"results" (list),
"Result" (str of "OK" or "ERROR")
"""
# Perform rapid search for ObjectID strings
searchtext = request.GET['q']
if ObjectId.is_valid(searchtext):
for obj_type, url, key in [
['Actor', 'crits.actors.views.actor_detail', 'id'],
['Backdoor', 'crits.backdoors.views.backdoor_detail', 'id'],
['Campaign', 'crits.campaigns.views.campaign_details', 'name'],
['Certificate', 'crits.certificates.views.certificate_details', 'md5'],
['Domain', 'crits.domains.views.domain_detail', 'domain'],
['Email', 'crits.emails.views.email_detail', 'id'],
['Event', 'crits.events.views.view_event', 'id'],
['Exploit', 'crits.exploits.views.exploit_detail', 'id'],
['Indicator', 'crits.indicators.views.indicator', 'id'],
['IP', 'crits.ips.views.ip_detail', 'ip'],
['PCAP', 'crits.pcaps.views.pcap_details', 'md5'],
['RawData', 'crits.raw_data.views.raw_data_details', 'id'],
['Sample', 'crits.samples.views.detail', 'md5'],
['Signature', 'crits.signatures.views.signature_detail', 'id'],
['Target', 'crits.targets.views.target_info', 'email_address']]:
obj = class_from_id(obj_type, searchtext)
if obj:
return {'url': url, 'key': obj[key]}
# Importing here to prevent a circular import with Services and runscript.
from crits.services.analysis_result import AnalysisResult
results = []
for col_obj,url in [
[Actor, "crits.actors.views.actors_listing"],
[AnalysisResult, "crits.services.views.analysis_results_listing"],
[Backdoor, "crits.backdoors.views.backdoors_listing"],
[Campaign, "crits.campaigns.views.campaigns_listing"],
[Certificate, "crits.certificates.views.certificates_listing"],
[Comment, "crits.comments.views.comments_listing"],
[Domain, "crits.domains.views.domains_listing"],
[Email, "crits.emails.views.emails_listing"],
[Event, "crits.events.views.events_listing"],
[Exploit, "crits.exploits.views.exploits_listing"],
[Indicator,"crits.indicators.views.indicators_listing"],
[IP, "crits.ips.views.ips_listing"],
[PCAP, "crits.pcaps.views.pcaps_listing"],
[RawData, "crits.raw_data.views.raw_data_listing"],
[Sample, "crits.samples.views.samples_listing"],
[Screenshot, "crits.screenshots.views.screenshots_listing"],
[Signature, "crits.signatures.views.signatures_listing"],
[Target, "crits.targets.views.targets_listing"]]:
ctype = col_obj._meta['crits_type']
resp = get_query(col_obj, request)
if resp['Result'] == "ERROR":
return resp
elif resp['Result'] == "IGNORE":
results.append({'count': 0,
'url': url,
'name': ctype})
else:
formatted_query = resp['query']
term = resp['term']
urlparams = resp['urlparams']
resp = data_query(col_obj, request.user.username, query=formatted_query, count=True)
results.append({'count': resp['count'],
'url': url,
'name': ctype})
return {'url_params': urlparams,
'term': term,
'results': results,
'Result': "OK"}
def download_grid_file(request, dtype, sample_md5):
"""
Download a file from GriDFS. The file will get zipped up.
This should go away and get roped into our other download feature.
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:param dtype: 'pcap', 'object', or 'cert'.
:type dtype: str
:param sample_md5: The MD5 of the file to download.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if dtype == 'object':
grid = mongo_connector("%s.files" % settings.COL_OBJECTS)
obj = grid.find_one({'md5': sample_md5})
if obj is None:
dtype = 'pcap'
else:
data = [(obj['filename'], get_file(sample_md5, "objects"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % obj['filename'] + ".zip"
return response
if dtype == 'pcap':
pcaps = mongo_connector(settings.COL_PCAPS)
pcap = pcaps.find_one({"md5": sample_md5})
if not pcap:
return render_to_response('error.html',
{'data': request,
'error': "File not found."},
RequestContext(request))
data = [(pcap['filename'], get_file(sample_md5, "pcaps"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % pcap['filename'] + ".zip"
return response
if dtype == 'cert':
certificates = mongo_connector(settings.COL_CERTIFICATES)
cert = certificates.find_one({"md5": sample_md5})
if not cert:
return render_to_response('error.html',
{'data': request,
'error': "File not found."},
RequestContext(request))
data = [(cert['filename'], get_file(sample_md5, "certificates"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % cert['filename'] + ".zip"
return response
def generate_counts_jtable(request, option):
"""
Generate the jtable data for counts.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "jtlist":
count = mongo_connector(settings.COL_COUNTS)
counts = count.find_one({'name': 'counts'})
response = {}
response['Result'] = "OK"
response['Records'] = []
if counts:
for k, v in sorted(counts['counts'].items()):
record = {}
record['type'] = k
record['count'] = v
record['id'] = 0
record['url'] = ""
response['Records'].append(record)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
else:
return render_to_response('error.html',
{'data': request,
'error': "Invalid request"},
RequestContext(request))
def generate_audit_jtable(request, option):
"""
Generate the jtable data for audit log entries.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = AuditLog
type_ = "audit"
if option == "jtlist":
# Sets display url
details_url = 'crits.core.views.details'
details_url_key = "target_id"
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Audit Log Entries",
'default_sort': "date DESC",
'listurl': reverse('crits.core.views.%s_listing' % type_,
args=('jtlist',)),
'deleteurl': '',
'searchurl': reverse('crits.core.views.%s_listing' % type_),
'fields': ["details",
"user",
"type",
"method",
"value",
"date",
"id"],
'hidden_fields': ["id"],
'linked_fields': [],
'details_link': 'details',
'no_sort': ['details', ],
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = []
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def details_from_id(type_, id_):
"""
Determine the details URL based on type and ID and redirect there.
:param type_: The CRITs type to search for.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:returns: str
"""
type_map = {'Actor': 'crits.actors.views.actor_detail',
'Backdoor': 'crits.backdoors.views.backdoor_detail',
'Campaign': 'crits.campaigns.views.campaign_details',
'Certificate': 'crits.certificates.views.certificate_details',
'Domain': 'crits.domains.views.domain_detail',
'Email': 'crits.emails.views.email_detail',
'Event': 'crits.events.views.view_event',
'Exploit': 'crits.exploits.views.exploit_detail',
'Indicator': 'crits.indicators.views.indicator',
'IP': 'crits.ips.views.ip_detail',
'PCAP': 'crits.pcaps.views.pcap_details',
'RawData': 'crits.raw_data.views.raw_data_details',
'Sample': 'crits.samples.views.detail',
'Screenshot': 'crits.screenshots.views.render_screenshot',
'Signature': 'crits.signatures.views.signature_detail',
'Target': 'crits.targets.views.target_info',
}
if type_ in type_map and id_:
return reverse(type_map[type_], args=(id_,))
# if type_ == 'Campaign':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.name
# elif type_ == 'Certificate':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.md5
# elif type_ == 'Domain':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.domain
# elif type_ == 'IP':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.ip
# elif type_ == 'PCAP':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.md5
# elif type_ == 'Sample':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.md5
# elif type_ == 'Email':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.message_id
# elif type_ == 'Target':
# arg = class_from_id(type_, id_)
# if arg:
# arg = arg.email_address
# else:
# arg = id_
#
# if not arg:
# return None
#
# return reverse(type_map[type_], args=(arg,))
else:
return None
def audit_entry(self, username, type_, new_doc=False):
"""
Generate an audit entry.
:param self: The object.
:type self: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param username: The user performing the action.
:type username: str
:param type_: The type of action being performed ("save", "delete").
:type type_: str
:param new_doc: If this is a new document being added to the database.
:type new_doc: boolean
"""
if username is None:
# If no username, skip the audit log
return
my_type = self._meta['crits_type']
# don't audit audits
if my_type in ("AuditLog", "Service"):
return
changed_fields = [f.split('.')[0] for f in self._get_changed_fields() if f not in ("modified",
"save",
"delete")]
# Remove any duplicate fields
changed_fields = list(set(changed_fields))
if new_doc and not changed_fields:
what_changed = "new document"
else:
what_changed = ', '.join(changed_fields)
key_descriptor = key_descriptor_from_obj_type(my_type)
if key_descriptor is not None:
value = getattr(self, key_descriptor, '')
else:
value = ""
if type_ == "save":
a = AuditLog()
a.user = username
a.target_type = my_type
a.target_id = self.id
a.value = what_changed
a.method = "save()"
try:
a.save()
except ValidationError:
pass
elif type_ == "delete":
a = AuditLog()
a.user = username
a.target_type = my_type
a.target_id = self.id
a.value = value
a.method = "delete()"
try:
a.save()
except ValidationError:
pass
# Generate audit notification
generate_audit_notification(username, type_, self, changed_fields, what_changed, new_doc)
def ticket_add(type_, id_, ticket, user, **kwargs):
"""
Add a ticket to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param ticket: The ticket to add.
:type ticket: dict with keys "date", and "ticket_number".
:param user: The user creating the ticket.
:type user: str
:returns: dict with keys:
"success" (boolean),
"object" (str) if successful,
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
ticket = datetime_parser(ticket)
ticket['analyst'] = user
obj.add_ticket(ticket['ticket_number'],
ticket['analyst'],
ticket['date'])
obj.save(username=user)
return {'success': True, 'object': ticket}
except (ValidationError, TypeError, KeyError), e:
return {'success': False, 'message': e}
def ticket_update(type_, id_, ticket, user=None, **kwargs):
"""
Update a ticket for a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param ticket: The ticket to add.
:type ticket: dict with keys "date", and "ticket_number".
:param date: The date of the ticket which will be updated.
:type date: datetime.datetime.
:param user: The user updating the ticket.
:type user: str
:returns: dict with keys:
"success" (boolean),
"object" (str) if successful,
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
ticket = datetime_parser(ticket)
ticket['analyst'] = user
obj.edit_ticket(ticket['analyst'],
ticket['ticket_number'],
ticket['date'])
obj.save(username=user)
return {'success': True, 'object': ticket}
except (ValidationError, TypeError, KeyError), e:
return {'success': False, 'message': e}
def ticket_remove(type_, id_, date, user, **kwargs):
"""
Remove a ticket from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param date: The date of the ticket to remove.
:type date: datetime.datetime.
:param user: The user removing the ticket.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
date = datetime_parser(date)
obj.delete_ticket(date)
obj.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def unflatten(dictionary):
"""
Unflatten a dictionary.
:param dictionary: The dictionary to unflatten.
:type dictionary: dict
:returns: dict
"""
resultDict = dict()
for key, value in dictionary.iteritems():
parts = key.split(".")
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return resultDict
def alter_sector_list(obj, sectors, val):
"""
Given a list of sectors on this object, increment or decrement
the sectors objects accordingly. This is used when adding
or removing a sector list to an item, and when deleting an item.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:param sectors: List of sectors.
:type sectors: list
:param val: The amount to change the count by.
:type val: int
"""
# This dictionary is used to set values on insert only.
# I haven't found a way to get mongoengine to use the defaults
# when doing update_one() on the queryset.
soi = { k: 0 for k in Sector._meta['schema_doc'].keys() if k != 'name' and k != obj._meta['crits_type'] }
soi['schema_version'] = Sector._meta['latest_schema_version']
# We are using mongo_connector here because mongoengine does not have
# support for a setOnInsert option. If mongoengine were to gain support
# for this we should switch to using it instead of pymongo here.
sectors_col = mongo_connector(settings.COL_SECTOR_LISTS)
for name in sectors:
sectors_col.update({'name': name},
{'$inc': {obj._meta['crits_type']: val},
'$setOnInsert': soi},
upsert=True)
# Find and remove this sector if, and only if, all counts are zero.
if val == -1:
Sector.objects(name=name,
Actor=0,
Campaign=0,
Certificate=0,
Domain=0,
Email=0,
Event=0,
Indicator=0,
IP=0,
PCAP=0,
RawData=0,
Sample=0,
Signature=0,
Target=0).delete()
def generate_sector_csv(request):
"""
Generate CSV output for the Sector list.
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return csv_export(request, Sector)
def generate_sector_jtable(request, option):
"""
Generate the jtable data for rendering in the sector list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == 'jtlist':
details_url = 'crits.core.views.sector_list'
details_key = 'name'
response = jtable_ajax_list(Sector,
details_url,
details_key,
request,
includes=['name',
'Actor',
'Backdoor',
'Campaign',
'Certificate',
'Domain',
'Email',
'Event',
'Exploit',
'Indicator',
'IP',
'PCAP',
'RawData',
'Sample',
'Signature',
'Target'])
return HttpResponse(json.dumps(response, default=json_handler),
content_type='application/json')
fields = ['name', 'Actor', 'Backdoor', 'Campaign', 'Certificate', 'Domain',
'Email', 'Event', 'Exploit', 'Indicator', 'IP', 'PCAP', 'RawData',
'Sample', 'Signature', 'Target']
jtopts = {'title': 'Sectors',
'fields': fields,
'listurl': 'jtlist',
'searchurl': reverse('crits.core.views.global_search_listing'),
'default_sort': 'name ASC',
'no_sort': [],
'details_link': ''}
jtable = build_jtable(jtopts, request)
for ctype in fields:
if ctype == 'id':
continue
elif ctype == 'name':
url = reverse('crits.core.views.global_search_listing') + '?search_type=sectors&search=Search&force_full=1'
else:
lower = ctype.lower()
if lower != "rawdata":
url = reverse('crits.%ss.views.%ss_listing' % (lower, lower))
else:
lower = "raw_data"
url = reverse('crits.%s.views.%s_listing' % (lower, lower))
for field in jtable['fields']:
if field['fieldname'].startswith("'" + ctype):
if ctype == 'name':
field['display'] = """ function (data) {
return '<a href="%s&q='+encodeURIComponent(data.record.name)+'">' + data.record.name + '</a>';
}
""" % url
else:
field['display'] = """ function (data) {
return '<a href="%s?sectors='+encodeURIComponent(data.record.name)+'">'+data.record.%s+'</a>';
}
""" % (url, ctype)
return render_to_response('sector_lists.html',
{'jtable': jtable,
'jtid': 'sector_lists'},
RequestContext(request))
def modify_kill_chain_list(itype, oid, kill_chains, analyst):
"""
Modify the sector list for a top-level object.
:param itype: The CRITs type of the top-level object to modify.
:type itype: str
:param oid: The ObjectId to search for.
:type oid: str
:param sectors: The list of sectors.
:type sectors: list
:param analyst: The user making the modifications.
"""
obj = class_from_id(itype, oid)
if not obj:
return
obj.set_kill_chain_list(kill_chains)
try:
obj.save(username=analyst)
except ValidationError:
pass
def modify_sector_list(itype, oid, sectors, analyst):
"""
Modify the sector list for a top-level object.
:param itype: The CRITs type of the top-level object to modify.
:type itype: str
:param oid: The ObjectId to search for.
:type oid: str
:param sectors: The list of sectors.
:type sectors: list
:param analyst: The user making the modifications.
"""
obj = class_from_id(itype, oid)
if not obj:
return
obj.add_sector_list(sectors, analyst, append=False)
try:
obj.save(username=analyst)
except ValidationError:
pass
def get_bucket_autocomplete(term):
"""
Get existing buckets to autocomplete.
:param term: The current term to look for autocomplete options.
:type term: str
:returns: list
"""
results = Bucket.objects(name__istartswith=term)
buckets = [b.name for b in results]
return HttpResponse(json.dumps(buckets, default=json_handler),
content_type='application/json')
def add_new_action(action, object_types, preferred, analyst):
"""
Add a new action to CRITs.
:param action: The action to add to CRITs.
:type action: str
:param object_types: The TLOs this is for.
:type object_types: list
:param preferred: The TLOs this is preferred for.
:type preferred: list
:param analyst: The user adding this action.
:returns: True, False
"""
action = action.strip()
idb_action = Action.objects(name=action).first()
if not idb_action:
idb_action = Action()
idb_action.name = action
idb_action.object_types = object_types
idb_action.preferred = []
prefs = preferred.split('\n')
for pref in prefs:
cols = pref.split(',')
if len(cols) != 3:
continue
epa = EmbeddedPreferredAction()
epa.object_type = cols[0].strip()
epa.object_field = cols[1].strip()
epa.object_value = cols[2].strip()
idb_action.preferred.append(epa)
try:
idb_action.save(username=analyst)
except ValidationError:
return False
return True
| jhuapl-marti/marti | crits/core/handlers.py | Python | mit | 182,605 | [
"Amber"
] | 8bff9b4018943185096345b9e447d864017cf22d3a3a1d9845cec7267bb29ea0 |
# sybase/base.py
# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
if not limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement),
default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| sunze/py_flask | venv/lib/python3.4/site-packages/sqlalchemy/dialects/sybase/base.py | Python | mit | 28,775 | [
"ASE"
] | fecf2d7fa2d981e48e258fc8492a431e2e8928f2b8c27ac9fb9180c30e043cda |
#interfacing with SVU class and forming core data structures
import glob
import os
import csv
import re
import datetime
#pip installed libs
import numpy as np
#local libs
import fusion_utils as fu
import read_utils as ru
import structural_variant_unit
def get_sv_types():
return structural_variant_unit.SVU().get_sv_types()
def pretty_ranges(B,units):
s,size_map = [],{0:'',1:'K',2:'M',3:'G',4:'T',5:'P',6:'E'}
for b in B:
x,m = [b,0],0
if x[0]/1000>0:
while x[0]/1000>0:
x[1] = x[0]%1000
x[0],m = x[0]/1000,m+1
d = ''
if x[1]>0: d = '.'+str(x[1])[:1]
s += [str(x[0])+d+size_map[m]+units]
else:
s += [str(b)+units]
t = []
for i in range(len(s)-1):
t += [s[i]+'-'+s[i+1]]
return t
#first beta vcf attempt, uses the sorting order of chroms
#CHROM, POS, ID, REF, ALT, QUAL, FILTER INFO
#QUAL is Phred-scale quality score for the assertion made in ALT 10log(prob(call in ALT is wrong))
def svul_to_genome(S,O):
L = {O[k]:k for k in O} #offsets as keys
#ctg = max([len(L[k]) for k in L])
C,B = [],sorted(L.keys())
for i in range(len(S)):
#find each chrom bin and subtract the offset
x1,x2,t,ys,wx,end = S[i][0],S[i][1],S[i][2],S[i][3],S[i][4],S[i][6:] #can have flt on index 7
xo,chrx = 0,''
for j in range(0,len(B)-1):
if B[j] <= x1 < B[j+1]: xo,chrx = B[j],L[B[j]]
for y in ys:
wy,yo,chry = S[i][5],0,''
for j in range(0,len(B)-1):
if B[j] <= y[0] < B[j+1]: yo,chry = B[j],L[B[j]]
#expand each svu into a source destination with chrom tags
C += [[chrx,int(x1)-xo,int(x2)-xo,wx,chry,int(y[0])-yo,int(y[1])-yo,wy,t]+end]
#C = sorted(C,key=lambda x: (x[0].rjust(ctg),x[1])) #SORTING ISSUES::::::::::::::::::::::::::::::
return C
#G = [chrx, posx1, posx2, wx, chry, posy1, posy2, wy, type, {idx}]
def svult_to_genome(S,O):
L = {O[k]:k for k in O} #offsets as keys
ctg = max([len(L[k]) for k in L])
G = []
for t in S:
G += svul_to_genome(S[t],O)
G = sorted(G,key=lambda x: (x[0].rjust(ctg),x[1])) #SORTING ISSUES HERE::::::::::::::::::::::::::::::
return G
#G = [chrx, posx1, posx2, wx, chry, posy1, posy2, wy, type, {idx}]
def svult_to_bed(S,O,bed_path,cid,rgb=[255,255,255]):
data,i = [],1
#fields = ['chrom','chromStart','chromEnd','name','score','strand','itemRgb'] #8bit RGB?
G = svult_to_genome(S,O)
for v in G:
data += [[v[0],v[1],v[2],str(i)+'-'+cid,1.0,'.',','.join([str(c) for c in rgb])]]
i += 1
with open(bed_path, 'wb') as bed:
csv_w = csv.writer(bed,delimiter='\t')
#csv_w.writerow(fields)
for row in data:
csv_w.writerow(row)
#bed file generator for the base call set C
def s2bed(S,O,bed_base_path):
for s in S:
types = list(set(list(S[s][:,2])))
for t in types:
i = np.where(S[s][:,2]==t)[0]
cid = str(t)+'C'+str(s)
svult_to_bed(S[s][i],O,bed_base_path+cid+'.bed',cid)
#:::TO DO::: take in all calls and use the alpha to add the PASS or lowqual status
#G = [chrx,x1,x2,wx,chry,y1,y2,wy,t,{idx}]
#what is needed to generate the VCF header and how can it be gathered
#before this steps happens, IE how to collect the metadata
#ref should be a dict with {'ref_name':{'chr1':len(chr1)}} taken from the info file
#CHROM POS ID REF ALT QUAL FILTER INFO (SAMPLE)
def genome_to_vcf(D,ref_seq,types,chroms,callers,out_path,sname,
target_key=None,header_path=None,fltr=1): #:::TO DO::: flt needs work in the mergeing step
refname = ref_seq.keys()[0] #get the ref name
C = {k:len(ref_seq[refname][k]) for k in ref_seq[refname]} #get the chrom names
ctg = max([len(k) for k in C])
CS = sorted(C.keys(), key =lambda x: x.rjust(ctg)) #SORTING ISSUES::::::::::::::::::::::::::::::
if header_path is None: #default vcf_header is in the data directory of fusionSVU
path = os.path.dirname(os.path.abspath(__file__))+'/data/header_template.vcf'
else:
path = header_path
raw = []
with open(path,'r') as f: raw = f.readlines() #load the header template
#assert a vcf header fiel was loaded....needs headers. etc could just use the vcf validator tool!
header,data = [],[] #the vcf header and the data atbel portion of the final file
hist = {t:{0:[],1:[]} for t in types}
if len(raw)>10 and raw[0].startswith('##fileformat=VCFv'): #header has at least 10 lines
#clear the header template comments done in //
for row in raw:
if not row.startswith('//'): #clear newlines
if row.startswith('##'): header += [row.replace('\n','')]
for i in range(len(header)):
if header[i].startswith('##fileDate='):
header[i] += str(datetime.date.today()).replace('-','')
if header[i].startswith('##reference='):
header[i] += refname #name
#construct a name length pair for each contig,,, '##contig=<ID=,len=,>'
for k in CS:
header += [''.join(['##contig=<ID=',k,',len=',str(C[k]),'>'])]
s = ','.join([str(k)+':'+callers[k] for k in sorted(callers.keys())])
header += ['##INFO=<ID=SVMETHOD,Number=.,Type=String,Description="'+s+'">']
header += ['\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',sname])] #GT 1/1
#header complete now parse and generate the data rows before writting data
#G = [chrx, posx1, posx2, wx, chry, posy1, posy2, wy, type, {idx}]
#:::TO DO::: take in all calls and use the alpha to add the PASS or lowqual status
for i in range(len(D)):
if D[i][0] in chroms:
idx = D[i][9] #{11:set([2])} //can we dig out the origunal ID field to attach here?
for k in idx: idx[k] = list(idx[k])
target = 1
if target_key in idx:
target = 0
idx.pop(target_key)
hist[D[i][8]][target] += [D[i][3]]
svmethod = str(D[i][9]).replace(' ','')
svmethod = svmethod.replace('{','')
svmethod = svmethod.replace('}','')
svmethod = svmethod.replace('[','')
svmethod = svmethod.replace(']','')
row = [D[i][0], #CHROM
str(D[i][1]), #POS
'fusorSV_'+str(i+1), #ID
ref_seq[refname][D[i][0]][D[i][1]], #REF
'<'+types[D[i][8]]+'>', #ALT
'.', #QUAL need to calculate
'PASS', #devise a pre and post filter
'SVTYPE='+types[D[i][8]]+';SVLEN='+str(D[i][2]-D[i][1])+';'+\
'END='+str(D[i][2])+';CHR2='+str(D[i][4])+';IMPRECISE;SVEX='+str(D[i][3])+\
';SVMETHOD='+svmethod+';TARGET='+str(target),
'GT','0/1']
data += [row]
#some string conversion now
vcf = '\n'.join(header)+'\n'
for i in data:
vcf += '\t'.join(i)+'\n'
with open(out_path,'w') as f: #looks good can just write now and run with the vcf validator tool
f.write(vcf)
return hist
#from tigra readme DOCS, tab-seperated
#CHR
#START_OUTER
#START_INNER
#END_INNER
#END_OUTER
#TYPE_OF_EVENT
#SIZE_PREDICTION
#MAPPING_ALGORITHM
#SEQUENCING_TECHNOLOGY
#SAMPLEs
#TYPE_OF_COMPUTATIONAL_APPROACH
#GROUP
#OPTIONAL_ID
#1 829757 829757 829865 829865 DEL 116 MAQ SLX NA19238,NA19240 RP WashU
def genome_to_g1k(D,types,chroms,sname,out_path,target_key=None,ex_flt=0.0,
map_alg='BWA',sq_tech='SLX',comp='FUSION',grp='JAXGM'):
data,g1k = [],''
if target_key is None: #this is a default for when you want to check all the calls
for i in range(len(D)):
if D[i][0] in chroms and D[i][8] in types and D[i][7]>=ex_flt:
data += [[D[i][0],str(D[i][1]),str(D[i][1]),str(D[i][2]),str(D[i][2]),
types[D[i][8]],str(D[i][2]-D[i][1]),
map_alg,sq_tech,sname,comp,grp,'fusorSV_'+str(i+1)]]
else: #this means that the call did not overlap with a target call
for i in range(len(D)):
idx = D[i][9] #{0:[-1]} flag in idx field for overlap with target
for k in idx: idx[k] = list(idx[k])
if D[i][0] in chroms and D[i][8] in types and D[i][7]>=ex_flt and not target_key in idx:
data += [[D[i][0],str(D[i][1]),str(D[i][1]),str(D[i][2]),str(D[i][2]),
types[D[i][8]],str(D[i][2]-D[i][1]),
map_alg,sq_tech,sname,comp,grp,'fusorSV_'+str(i+1)]]
for i in data:
g1k += '\t'.join(i)+'\n'
with open(out_path,'w') as f:
f.write(g1k)
return data
#t:[[x1,x2,t,[y],wx,wy,{idx}]] => t:[[chrx,x1,x2,t,[[chry,y1,y2]],wx,wy,{idx}]]
def svult_to_glt(S,O):
L = {O[k]:k for k in O} #offsets as keys
B,G = sorted(L.keys()),{} #sort the offsets
for t in S:
C = []
for i in range(len(S[t])):
#find each chrom bin and subtract offset
x1,x2,y = S[t][i][0],S[t][i][1],S[t][i][3]
wx,wy,idx = S[t][i][4],S[t][i][5],S[t][i][6]
chrx,ys = '',[[] for k in y]
for j in range(0,len(B)-1):
if B[j] <= x1 < B[j+1]:
chrx,x1,x2 = L[B[j]],int(x1)-B[j],int(x2)-B[j]
for k in range(len(y)):
if B[j] <= y[k][0] < B[j+1]:
ys[k] = [L[B[j]],int(y[k][0])-B[j],int(y[k][1])-B[j]]
C += [[chrx,x1,x2,t,ys,wx,wy,idx]]
G[t] = C
return G
#svua is [x1,x2,t,y1,y2,wx,wy]
#svul is [x1,x2,t,y1,y2,wx,wy,idx={}]
def svua_to_svul(S):
C = []
for i in range(len(S)):
C += [list(S[i])+[{}]]
return C
def svul_to_svua(C):
if len(C) > 0:
S = np.zeros((len(C),4),dtype='u4')
for i in range(len(C)):
S[i] = C[i]
S[i][3] = 0 #hard coded y ------------------------------------------------
else:
S = np.array([],dtype='u4')
return S
#mark for updates----------------------------------------------
#given: Q[type][f_id][sname][i] return tigra indecies to lookup ctgs
def tigra_ids(Q,sname,idx=6,f_id=-1,t_id=38):
I = {}
for t in Q:
if Q[t].has_key(f_id) and Q[t][f_id].has_key(sname) and len(Q[t][f_id][sname])>0:
for i in range(len(Q[t][f_id][sname])):
if Q[t][f_id][sname][i][idx].has_key(t_id):
I[i] = {k:{} for k in Q[t][f_id][sname][i][idx][t_id]}
return I
def info_to_idx(info):
svmethod = info.split('SVMETHOD=')[-1].split(';')[0]
raw = svmethod.split(',')
idx,prev = {},0
for i in range(len(raw)):
if raw[i].find(':')>0:
split = raw[i].split(':')
prev = int(split[0])
idx[prev] = set([int(split[1])])
else:
idx[prev].add(int(raw[i]))
return idx
def idx_to_str(idx):
s = ''
for k in sorted(idx):
s += str(k)+':'+','.join(sorted(list(idx[k])))+'|'
return s[:-1]
def get_info_v(info,k,d=';'):
#match the key in start position, or ; delimited or with a whitespace in front
p = '\A'+k+'=|['+d+']'+k+'=|[\s]'+k+'='
m = re.search(p,info)
if m is None: v = ''
else: v = info[m.end():].split(d)[0]
return v
def info_to_svtype(info):
return get_info_v(info,'SVTYPE')
def info_to_end(info):
return int(get_info_v(info,'END'))
def info_to_len(info):
return int(info.split('SVLEN=')[-1].split(';')[0])
def info_to_consensus(info):
return get_info_v(info,'CONSENSUS')
def info_to_svex(info):
return float(info.split('SVEX=')[-1].split(';')[0])
def info_to_target(info):
return [int(i.split('_')[0]) for i in info.split('TARGET=')[-1].split(';')[0].split(',')]
def info_to_svmethod(info):
return info.split('SVMETHOD=')[-1].split(';')[0]
def g1kP3_info_to_fusorSV_info(info,k,i):
return info+';SVEX=1.0;SVMETHOD=%s:%s;TARGET=1'%(k,i)
def lift_tuple_same_strand(lift_tuple):
return True
def g1kP3_liftover(vcf_path,ref_path,chain_path,
CHR=0,POS=1,ID=2,REF=3,ALT=4,QUAL=5,FILT=6,INFO=7,FORMAT=8,SAMPLE=9,
add_chr=True):
return True
#given a multi sample delly file, query
def delly_vcf_reader(vcf_glob,out_vcf,reference,samples=['HG00513','HG00733','NA19240'],
VCF_CHR=0,VCF_POS=1,VCF_ID=2,VCF_REF=3,VCF_ALT=4,
VCF_QUAL=5,VCF_FILT=6,VCF_INFO=7,VCF_FORMAT=8,VCF_SAMPLE=9):
header,data,snames = [],[],[]
for vcf in glob.glob(vcf_glob):
with open(vcf,'r') as f:
sname,i,k = vcf.rsplit('/')[-1].split('.vcf')[0],0,0
for line in f:
if not line.startswith('#'):
r = line.split('\n')[0].split('\t')
r[VCF_ID] += '_'+sname
r[VCF_POS] = int(r[VCF_POS])
ln = info_to_end(r[VCF_INFO])-r[VCF_POS] #
r[VCF_INFO] = g1kP3_info_to_fusorSV_info(r[VCF_INFO],k,i)
data += [[r[VCF_CHR],r[VCF_POS],info_to_end(r[VCF_INFO]),ln]+r[VCF_ID:]]
i += 1
vcfs = glob.glob(vcf_glob) #grab first header only
with open(vcfs[0],'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
snames = header[-1][VCF_FORMAT+1:]
#now sort and cluster calls for genotyping
data = coordinate_sort_pos(data)
return True
#
def slice_merged(flat_data):
mag = 0
B = [(1,100),(100,250),(250,500),(500,1000),(1000,2500),
(2500,5000),(5000,10000),(10000,50000),(50000,100000),
(100000,1000000),(1000000,100000000)]
sliced_data = {'INS':{b:0 for b in B},'DEL':{b:0 for b in B},
'DUP':{b:0 for b in B},'INV':{b:0 for b in B}}
for row in flat_data:
t = row[4].replace('<','').replace('>','')
l = info_to_end(row[7])-int(row[1])
mag += l
for b in B:
if l>b[0] and l<=b[1]:
sliced_data[t][b] += 1
return sliced_data
def g1kP3_vcf_multi_sample_merge(vcf_glob,out_vcf,reference,overlap=0.5,
VCF_CHR=0,VCF_POS=1,VCF_ID=2,VCF_REF=3,VCF_ALT=4,
VCF_QUAL=5,VCF_FILT=6,VCF_INFO=7,VCF_FORMAT=8,VCF_SAMPLE=9):
header,data,snames = [],[],[]
for vcf in glob.glob(vcf_glob):
with open(vcf,'r') as f:
sname,i,k = vcf.rsplit('/')[-1].split('_S0.vcf')[0],0,0
snames += [sname]
for line in f:
if not line.startswith('#'):
r = line.split('\n')[0].split('\t')
r[VCF_ID] += '_'+sname
r[VCF_POS] = int(r[VCF_POS])
ln = info_to_end(r[VCF_INFO])-r[VCF_POS] #
r[VCF_INFO] = g1kP3_info_to_fusorSV_info(r[VCF_INFO],k,i)
data += [[r[VCF_CHR],r[VCF_POS],info_to_end(r[VCF_INFO]),ln]+r[VCF_ID:]]
i += 1
vcfs = glob.glob(vcf_glob) #grab first header only
with open(vcfs[0],'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
header[-1] = '\t'.join(header[-1][0:9]+['FORMAT']+snames) #fix up the sample colums
raw = []
for i in range(0,len(data),2):
raw += [data[i]]
data = raw
#now sort and cluster calls for genotyping
data = coordinate_sort_pos(data)
cluster = coordinate_cluster(data,overlap)
flat_data = clusters_to_flattened_str(cluster,snames,reference)
vcf = '\n'.join([''.join(h) for h in header])+'\n'
for i in flat_data:
vcf += '\t'.join(i)+'\n'
with open(out_vcf,'w') as f: #looks good can just write now and run with the vcf validator tool
f.write(vcf)
return True
return False
#downstream analysis check of lifted coordinates
def fusorSV_liftover(vcf_path,ref_path,chain_path,
CHR=0,POS=1,ID=2,REF=3,ALT=4,QUAL=5,FILT=6,INFO=7,FORMAT=8,SAMPLE=9,
add_chr=True):
import crossmap as cs
if add_chr: chrom = 'chr'
else: chrom = ''
I,header,data,err = {},[],[],[]
with open(vcf_path,'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
else:
data += [line.split('\n')[0].split('\t')]
#now do the coordinate to coordinate liftover search
mapTree,targetChromSizes, sourceChromSizes = cs.read_chain_file(chain_path)
for row in data:
try:
q_chr = chrom + row[CHR]
q_start = int(row[POS])
q_stop = info_to_end(row[INFO])
fusorSV_id = int(row[ID].replace('fusorSV_',''))
I[fusorSV_id] = cs.map_coordinates(mapTree,q_chr,q_start,q_stop)
except Exception:
err += [row]
return True
#read the fasta files and parse out the correct locations to liftover
def breakseq2_fasta_parser(fasta_path,CHROM=0,POS=1,SET=2,TYPE=3,ID=4):
raw = []
with open(fasta_path,'r') as f: raw += f.readlines()
raw = ''.join(raw).split('\n')
while raw[-1]=='': raw.pop() #trim any empty ends here
data = []
for i in range(0,len(raw),2):
row = raw[i].split(':')
chrom,pos,st,typ,ids = row[0:ID+1]
start,stop = [int(j) for j in pos.split('-')]
chrom = chrom.replace('>','')
end = []
if len(row)>ID+1: end += row[ID+1:]
data += [[chrom,start,stop,st,typ,ids]+end+[raw[i+1]]]
return data
#lift coordinates for a breakseq2 library
def breakseq2_liftover(brkpt_path, source_ref_path, destination_ref_path, chain_path, add_chr=True):
source_ref_path = '/Users/tbecker/Desktop/TEMP/human_g1k_v37_decoy/human_g1k_v37_decoy.fa'
source_ref = ru.read_fasta(source_ref_path,True)
destination_ref_path = '/Users/tbecker/Desktop/TEMP/human_g1k_v38_decoy_hla/human_g1k_v38_decoy_hla.fa'
destination_ref = ru.read_fasta(destination_ref_path,True)
fasta_path = '/Users/tbecker/Desktop/TEMP/human_g1k_v37_decoy/human_g1k_v37_decoy_S35.brkptlib.fna'
chain_path = '/Users/tbecker/Desktop/TEMP/fusionSVU/data/liftover/hg19ToHg38.over.chain.gz'
return True
def add_chrom(data,CHR=0):
for row in data:
if row[CHR] in [str(x) for x in range(23)]+['Y','X','MT']:
row[CHR] = 'chr'+row[CHR]
return data
#cluster a set of fusorSV per sample VCF files
def fusorSV_vcf_multi_sample_merge(vcf_glob,out_vcf,reference,overlap=0.5,add_chr=True,
VCF_CHR=0,VCF_POS=1,VCF_ID=2,VCF_REF=3,VCF_ALT=4,
VCF_QUAL=5,VCF_FILT=6,VCF_INFO=7,VCF_FORMAT=8,VCF_SAMPLE=9):
header,data,snames = [],[],[]
for vcf in glob.glob(vcf_glob):
with open(vcf,'r') as f:
sname = vcf.rsplit('/')[-1].split('_S-1.vcf')[0]
snames += [sname]
for line in f:
if not line.startswith('#'):
r = line.split('\n')[0].split('\t')
r[VCF_ID] += '_'+sname
r[VCF_POS] = int(r[VCF_POS])
data += [[r[VCF_CHR],r[VCF_POS],info_to_end(r[VCF_INFO]),info_to_len(r[VCF_INFO])]+\
r[VCF_ID:VCF_SAMPLE+1]]
vcfs = glob.glob(vcf_glob) #grab first header only
with open(vcfs[0],'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
header[-1] = '\t'.join(header[-1][0:9]+snames) #fix up the sample colums
#cluster calls for genotyping
cluster = coordinate_cluster(data,overlap)
flat_data = clusters_to_flattened_str(cluster,snames,reference)
if add_chr: flat_data = add_chrom(flat_data)
#remove_C = ['C_12596', 'C_20039', 'C_17709', 'C_5949', 'C_6014', 'C_21055', 'C_17312', 'C_9188', 'C_2567', 'C_18819', 'C_18814']
#remove_S = list(set(snames).difference(set(['NA19017','NA12878','HG00419','NA19238','NA19239','NA19625','NA18525'])))
#flat_data = filter_fusorSV_vcf_multi_sample_merge(flat_data,remove_C,remove_S)
#write the header and flat data to a new vcf file
vcf = '\n'.join([''.join(h) for h in header])+'\n'
for i in flat_data:
vcf += '\t'.join(i)+'\n'
with open(out_vcf,'w') as f: #looks good can just write now and run with the vcf validator tool
f.write(vcf)
return True
return False
def filter_fusorSV_vcf_multi_sample_merge(flat_data,remove_C,VCF_ID=2):
filtered_data = []
for row in flat_data:
if row[VCF_ID] not in remove_C:
filtered_data += [row]
return filtered_data
#read in individual fusorSV sample VCF files, multi_sample VCF sample/id/validation table
def fusorSV_multi_sample_merge_query(fusorSV_vcf_dir,sample_id_validation):
samples = glob.glob(fusorSV_vcf_dir+'*_S-1.vcf') #get fusorSV VCF files out
snames = [s.rsplit('/')[-1].split('_S-1.vcf')[0] for s in samples]
merged_samples = fusorSV_vcf_dir+'/all_samples_genotypes_liftover.mapped.vcf'
raw,header,merged_data = [],[],[]
with open(merged_samples,'r') as f:
raw = f.readlines()
for line in raw:
if line.startswith('#'):
header += [line]
header[-1] = header[-1].replace('\n','')
else:
merged_data += [line.split('\t')]
merged_data[-1][-1] = merged_data[-1][-1].replace('\n','')
header_key,i = header[-1].split('\t'),0
header_key[0] = header_key[0].replace('#','')
while i < len(header_key) and header_key[i]!='FORMAT':i += 1
if i < len(header_key): i += 1
n = len(header_key[i:]) #number of samples to look at
contig_len = max([len(i[0]) for i in merged_data])
merged_data = sorted(merged_data,key=lambda x: (x[0].zfill(contig_len),int(x[1])))
return merged_data
#need the bedtools intersect output for refseq
def fusorSV_bed_gene_converter(refseq_bed,gene_counts,REFSEQ_ID=3):
import mygene
data,genes = [],{}
with open(refseq_bed,'r') as f:
for line in f:
data += [line.split('\t')]
mg = mygene.MyGeneInfo()
for row in data:
q = str(mg.query(row[REFSEQ_ID])['hits'][0]['symbol'])
if not genes.has_key(q): genes[q] = 1
else: genes[q] += 1
s = 'total genes=%s\taverage exons per gene=%s\n'%(len(genes),sum([genes[g] for g in genes])*1.0/len(genes))
for g in sorted(genes):
s += '%s,'%g
with open(gene_counts,'w') as f:
f.write(s[:-2])
return True
return False
def fusorSV_multi_sample_merge_query_write(query_data,header,out_vcf):
s = '\n'.join(header)+'\n'
s += '\n'.join(['\t'.join(i) for i in query_data])+'\n'
with open(out_vcf,'w') as f:
f.write(s)
return True
return False
#using string INFO field: IE 'DEL', 'DUP', ect
def query_svtype(data,svtype,INFO=7):
result = []
for row in data:
if info_to_svtype(row[INFO])==svtype:
result += [row]
return result
#get the frequency out of genotyped rows
def query_frequency(data,c,freq,GT=9):
result = []
for row in data:
if c == '<' and sum([1 if i=='0/1' else 0 for i in row]) < freq:
result += [row]
elif c == '<=' and sum([1 if i=='0/1' else 0 for i in row]) <= freq:
result += [row]
elif c == '>' and sum([1 if i=='0/1' else 0 for i in row]) > freq:
result += [row]
elif c == '>=' and sum([1 if i=='0/1' else 0 for i in row]) >= freq:
result += [row]
elif c == '==' and sum([1 if i=='0/1' else 0 for i in row]) == freq:
result += [row]
elif c == '!=' and sum([1 if i=='0/1' else 0 for i in row]) != freq:
result += [row]
return result
def query_svex(data,c,svex,INFO=7):
result = []
for row in data:
if c == '<' and info_to_svex(row[INFO]) < svex:
result += [row]
elif c == '<=' and info_to_svex(row[INFO]) <= svex:
result += [row]
elif c == '>' and info_to_svex(row[INFO]) > svex:
result += [row]
elif c == '>=' and info_to_svex(row[INFO]) >= svex:
result += [row]
elif c == '==' and info_to_svex(row[INFO]) == svex:
result += [row]
elif c == '!=' and info_to_svex(row[INFO]) != svex:
result += [row]
return result
#returns all target values that all agree,
#set agree=False and you get out the conflicting calls
def query_target(data,t=0,agree=True,INFO=7):
result = []
for row in data:
if agree and all([t==j for j in info_to_target(row[INFO])]):
result += [row]
elif not agree and (any([t!=j for j in info_to_target(row[INFO])]) and \
any([t==j for j in info_to_target(row[INFO])])):
result += [row]
return result
#returns where a caller is present for all the samples
def query_caller_presence(data,t_id=38,p=0.5,INFO=7):
result = []
for row in data:
M = info_to_svmethod(row[INFO]).split('|')
S = svmethod_to_sample(M)
x = [1 if t_id in S[k] else 0 for k in S]
if 1.0*sum(x)/len(x)>=p:
result += [row]
return result
#returns calls that have a certain number of supporting algorithms
#agree means that all the samples have to have more than c callers
def query_caller_number(data,c='<',x=3,agree=True,INFO=7):
results = []
for row in data:
M = info_to_svmethod(row[INFO]).split('|')
S = svmethod_to_sample(M)
if agree:
if c=='<' and all([len(S[k])<x for k in S]):
results += [row]
elif c=='<=' and all([len(S[k])<=x for k in S]):
results += [row]
elif c=='>' and all([len(S[k])>x for k in S]):
results += [row]
elif c=='>=' and all([len(S[k])>=x for k in S]):
results += [row]
else:
if c=='<' and any([len(S[k])<x for k in S]) and any([len(S[k])>=x for k in S]):
results += [row]
elif c=='<=' and any([len(S[k])<=x for k in S]) and any([len(S[k])>x for k in S]):
results += [row]
elif c=='>' and any([len(S[k])>x for k in S]) and any([len(S[k])<=x for k in S]):
results += [row]
elif c=='>=' and any([len(S[k])>=x for k in S]) and any([len(S[k])<x for k in S]):
results += [row]
return results
#for a list of samples return the calls that they are genotyped possitive
def query_sample_presence(data,s_ids,INFO=7):
result = []
for row in data:
M = info_to_svmethod(row[INFO]).split('|')
S = svmethod_to_sample(M)
for s_id in s_ids:
if s_id in S.keys():
result += [row]
break
return result
#returns each samples caller group
def svmethod_to_sample(svmethod):
S = {}
for i in svmethod:
c,s = i.split(':')
s = set([x.split('_')[-1] for x in s.split(',')])
for j in s:
if S.has_key(j): S[j] += [int(c)]
else: S[j] = [int(c)]
for j in S:
S[j] = tuple(sorted(S[j]))
return S
#given a a list of FusorSV rows, do a downstream analysis
#on the files where all group frequencies are returned for each sample
def get_svmethod_gfreq(data,INFO=7):
G = {}
for row in data:
M = info_to_svmethod(row[INFO]).split('|')
S = svmethod_to_sample(M)
for i in [S[k] for k in S]:
for j in i:
if G.has_key(j): G[j] += 1
else: G[j] = 1
return G
def get_row_freq(row,INFO=7):
svmethods,S = info_to_svmethod(row[INFO]).split('|'),{}
C = {int(i.split(':')[0]):{j.split('_')[-1]:int(j.split('_')[0]) for j in i.split(':')[-1].split(',')} for i in svmethods}
for c in C:
for s in C[c]:
if not S.has_key(s):
S[s] = {c:[C[c][s]]}
else:
if not S[s].has_key(c):
S[s][c] = [C[c][s]]
else:
S[s][c] += [C[c][s]]
return S
#group frequency of a given set of data
#samples = {sname:{f_id:0},...}
def get_group_validation_frequency(data,samples,INFO=7):
F = {}
for row in data:
S = get_row_freq(row,INFO)
for s in S:
if s in samples:
g = tuple(sorted(S[s].keys()))
if not F.has_key(g):
F[g] = {}
return F
#custom search tool
#Cluster Type SVEX TARGET SVMETHOD fusroSV_id
def three_list_fusorSV_id_search(id_path,repaired_vcf,flat_data):
#new calls we are looking for
with open(id_path,'r') as f:
raw = f.readlines()
data1 = [r.strip('\n').split('\t') for r in raw]
#old ones that were repaired
header,data2,err = [],[],[]
with open(repaired_vcf,'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
else:
data2 += [line.split('\n')[0].split('\t')]
filtered,removed = [],[]
for e in data1:
if e[5] not in [f[2] for f in data2]:
filtered += [e]
else:
removed += [e]
ids,N = [e[2] for e in data2],{}
for i in ids:
for row in flat_data:
fids = row[7].split('FUSORSVID=')[-1].split(';')[0].split(',')
if i in fids and len(fids) == 1:
N[i] = row[2]
1.0*sum([N[k] for k in N])/len(N) #average allele frequency
return []
#sort rows by chrom, type, start, length
def coordinate_sort_type(data,MAX=100,CHR=0,TYPE=6,R0=1,LEN=3):
return sorted(data,key=lambda x: (x[TYPE],x[CHR].zfill(MAX),x[R0],x[LEN]))
def coordinate_sort_pos(data,MAX=100,CHR=0,TYPE=6,R0=1,LEN=3):
return sorted(data,key=lambda x: (x[CHR].zfill(MAX),x[R0],x[LEN]))
def coordinate_overlap(a,b,r0=1,r1=2,chr1=0,svtype=6): #add s1, s2 later
if a[svtype]!=b[svtype] or a[chr1]!=b[chr1]:
x = 0.0
else:
l = (a[r1]-a[r0]+1)+(b[r1]+b[r0]+1)
u = float(min(l,max(a[r1],b[r1])-min(a[r0],b[r0])+1))
i = 1.0*abs(a[r0]-b[r0])+abs(a[r1]-b[r1])
x = max(0.0,u-i)/u
return x
def coordinate_partition_type(data,TYPE=6):
types = set([x[TYPE] for x in data])
T = {t:[] for t in types}
for row in data:
T[row[TYPE]] += [row]
return T
#fast LR overlap clustering
#for each contig and for each svtype
#for every two rows in a cluster, they have at least >= overlap amount
def coordinate_cluster(data,overlap=0.5,MAX=100):
d = coordinate_partition_type(coordinate_sort_pos(data,MAX=MAX))
C = {t:{} for t in d}
for t in d:
i,j,n = 0,0,len(d[t])
while i < n-1:
j = i+1
while coordinate_overlap(d[t][i],d[t][j])>=overlap and j < n-1:
j += 1
C[t][i] = d[t][i:j]
i = j
if not C[t].has_key(j) and j < n-1: C[t][j] = [d[j]]
F,i = {},0
for t in C:
for j in sorted(C[t]):
F[i] = C[t][j]
i += 1
return F
#recursive overlap clustering, following 1000 genomes phase 3 supplements
def coordinate_cluster_g1k_p3_style(data,overlap=0.5):
C = {}
#will use a helper function to pass in C and overlap only
return C
def clusters_to_flattened_str(cluster,snames,reference,average=True,
CHR=0,POS=1,END=2,ID=4,REF=5,ALT=6,QUAL=7,
FILT=8,INFO=9,FORMAT=10,SAMPLE=11,
VCF_CHR=0,VCF_POS=1,VCF_ID=2,VCF_REF=3,VCF_ALT=4,
VCF_QUAL=5,VCF_FILT=6,VCF_INFO=7,VCF_FORMAT=8,VCF_SAMPLE=9):
data,seq_name_len = [],max([len(k) for k in reference])
for k in cluster:
row = [cluster[k][0][CHR],cluster[k][0][POS],'C_'+str(k),cluster[k][0][REF],
cluster[k][0][ALT],'.','PASS',cluster[k][0][INFO],'GT','0/1']
if average:
pos = int(round(1.0*sum([e[POS] for e in cluster[k]])/len(cluster[k]),0)) #average pos
end = int(round(1.0*sum([e[END] for e in cluster[k]])/len(cluster[k]),0)) #avergae end
svlen = end-pos+1 #average svlen
svex = str(sum([info_to_svex(e[INFO]) for e in cluster[k]])/len(cluster[k])) #average svex
idx,targets = {},[] #merged idx now has caller_id:set([row+samplename]) mapping
f_ids = [cluster_fusorSV_id_to_sample(e[ID]) for e in cluster[k]]
fusorSV_ids = [e[ID] for e in cluster[k]]
for e in cluster[k]:
new = info_to_idx(e[INFO])
sname = cluster_fusorSV_id_to_sample(e[ID])
targets += [str(info_to_target(e[INFO])[0])+'_'+sname] #now target flag and sample
for i in new:
if not idx.has_key(i):
idx[i] = set([str(j)+'_'+sname for j in new[i]])
else:
idx[i] = idx[i].union(set([str(j)+'_'+sname for j in new[i]]))
svmethod = idx_to_str(idx)
row[VCF_POS] = str(pos)
row[VCF_INFO] = cluster_info_update(row[VCF_INFO],svlen,end,svex,svmethod,targets,fusorSV_ids)
#row[VCF_REF] = reference[row[VCF_CHR]].seq[pos] #HTSeq version
row[VCF_REF] = reference[row[VCF_CHR]][pos]
row[VCF_SAMPLE] = cluster_to_samples(snames,f_ids)
data += [row]
data = sorted(data,key=lambda x: (x[VCF_CHR].zfill(seq_name_len),int(x[VCF_POS])))
return data
def cluster_to_samples(snames,f_ids):
samples = ''
for s in snames:
if s in f_ids:
samples += '0/1'+'\t'
else:
samples += '0/0'+'\t'
return samples[:-1]
#verage up and update the row
def cluster_info_update(info,svlen,end,svex,svmethod,targets,
fusorSV_ids=None,
remove_destination_coordinates=True):
if remove_destination_coordinates:
info = info.split('CHR2=')[0]+';'.join(info.split('CHR2=')[-1].split(';')[1:])
l = info.split('SVLEN=')[0]
r = ';'.join(info.split('SVLEN=')[-1].split(';')[1:])
info = l+'SVLEN='+str(svlen)+';'+r
l = info.split('END=')[0]
r = ';'.join(info.split('END=')[-1].split(';')[1:])
info = l+'END='+str(end)+';'+r
l = info.split('SVEX=')[0]
r = ';'.join(info.split('SVEX=')[-1].split(';')[1:])
info = l+'SVEX='+str(svex)+';'+r
l = info.split('SVMETHOD=')[0]
r = ';'.join(info.split('SVMETHOD=')[-1].split(';')[1:])
info = l+'SVMETHOD='+str(svmethod)+';'+r
l = info.split('TARGET=')[0]
if fusorSV_ids is not None: #fusorSV_ids here
info = l+'FUSORSVID='+','.join(fusorSV_ids)+';'+'TARGET='+','.join(targets)
else:
info = l+'TARGET='+','.join(targets)
return info
def cluster_fusorSV_id_to_sample(fusorSV_id):
return fusorSV_id.rsplit('_')[-1]
#downstream analysis check of lifted coordinates
def fusorSV_fix_merged_samples(vcf_in_path,vcf_out_path):
header,data = [],[]
with open(vcf_in_path,'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
else:
r = line.split('\n')[0].split('\t')
r[2] += '_'+r[-1]
data += [r[0:8]]
header[-1] = ['\t'.join(header[-1][0:8])] #fix the colums specs
s = '\n'.join([h[0] for h in header])+'\n'
s += '\n'.join(['\t'.join(row) for row in data])+'\n'
with open(vcf_out_path,'w') as f:
f.write(s)
return True
return False
def fusorSV_vcf_liftover(vcf_in_path,ref_path,chain_path):
import crossmap as cs
mapTree,targetChromSizes, sourceChromSizes = cs.read_chain_file(chain_path)
cs.crossmap_vcf_file(mapTree,vcf_in_path,chain_path,ref_path)
return True
def fusorSV_vcf_liftover_samples(sample_dir_glob,ref_path,chain_path):
vcfs = glob.glob(sample_dir_glob)
for vcf in sorted(vcfs):
fusorSV_vcf_liftover(vcf,ref_path,chain_path)
#reads and parses the fusorSV VCF file here to attach supporting ids
def fusorSV_support_ids(vcf_path,ID=2,INFO=7,s_id=[38]):
I,header,data,err = {s:{} for s in s_id},[],[],[]
with open(vcf_path,'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
else:
data += [line.split('\n')[0].split('\t')]
for row in data:
try:
fusorSV_id = int(row[ID].replace('fusorSV_',''))
idx = info_to_idx(row[INFO])
for s in s_id:
if idx.has_key(s):
I[s][fusorSV_id] = {k:{} for k in idx[s]}
except Exception:
err += [row]
pass
return I
#given a fusorSV_id mapps in the original VCF line of a supporting VCF row
def support_id_map(M,V,ID=2,s_id=[],callers=None):
U,K = {s:{} for s in s_id},{s:{} for s in s_id}
for s in U:
if V.has_key(s):
for j in range(len(V[s])):
k = tuple(V[s][j].svu[0])
if U[s].has_key(k): U[s][k] += [j]
else: U[s][k] = [j]
K[s] = {tuple(U[s][k]):list(k) for k in U[s]}
for s in M:
for i in M[s]:
for j in M[s][i]:
for k in K[s]:
if j in k:
row = V[s][j].as_vcf_row()
if callers is None:
row = row[0:ID]+['S'+str(s)+'_'+str(j)]+row[ID+1:]
else:
row = row[0:ID]+[callers[s]+'_'+str(j)]+row[ID+1:]
for x in range(len(row)):
if not type(row[x]) is str:
row[x] = str(row[x])
M[s][i][j] = [str(x) for x in row]
return M
#SM already has the s_ids that are attached...
def support_id_search(SM,fusorSV_vcf,ID=2,INFO=7):
IS,header,data,err = {},[],[],[]
with open(fusorSV_vcf,'r') as f:
for line in f:
if line.startswith('#'):
header += [line.split('\n')[0].split('\t')]
else:
data += [line.split('\n')[0].split('\t')]
header[-1] = header[-1][0:-2] #just go up to the INFO feild for now
for row in data:
try:
fusorSV_id = int(row[ID].replace('fusorSV_',''))
idx = info_to_idx(row[INFO])
for s in SM:
if idx.has_key(s):
if IS.has_key(fusorSV_id):
IS[fusorSV_id] += [SM[s][fusorSV_id][k] for k in idx[s]]
else:
IS[fusorSV_id] = [SM[s][fusorSV_id][k] for k in idx[s]]
except Exception:
err += [row]
pass
return IS,header
#write the supporting VCF file for downstream analyis
def write_support_id_vcf(IS,header,support_vcf_path):
S = '\n'.join(['\t'.join(i) for i in header])+'\n'
for i in sorted(IS):
S += '\n'.join(['\t'.join(j) for j in IS[i]])+'\n'
with open(support_vcf_path,'w') as f:
f.write(S)
return True
return False
#replace original vcf id field with the fusorSV supporting ids instead
#replace ALT with the ALT contig with the longest tigraSV CTG if one exists
#given V and fusorSV idex into it, look back and merge
#INFO;CTG= fields to retrieve the CTG ids to look into the fasta file in tigra_Y
def tigra_id_to_ctg_map(M,V,t_id=38):
#i will be the highest index, look back into i-x entries to merge x records
U = {}
for j in range(len(V[t_id])):
k = tuple(V[t_id][j].svu[0])
if U.has_key(k): U[k] += [j]
else: U[k] = [j]
K = {tuple(U[k]):list(k) for k in U}
for s in M:
for i in M[s]: #list of t_id keys
for j in M[s][i]:
for k in K:
if j in k: #pull out the tigra CTG from the info field
M[s][i][j] = {V[t_id][x].info.split('CTG=')[-1].split(';')[0]:'' for x in k}
return M
#given a tigra ctg map M, a coordinate offset map O and the fasta file ctg_fasta
#dig out the contig sequences in relation to the chrom start/stop position
#and do something with them (DP sliding alignment with affine gap?)
def tigra_ctg_search(M,ctg_fasta,t_id=38):
seq,raw,err = {},'',0
with open(ctg_fasta,'r') as f:
raw = f.readlines()
for i in range(0,len(raw),2): #tigra fasta is even idex = >ctg, odd idex = sequence
if raw[i].startswith('>') and i < len(raw)+1:
ctg = raw[i].upper().replace('>','').split(' ')[0]
seq[ctg] = raw[i+1].replace('\n','')
for s in M:
for i in M[s]:
for j in M[s][i]:
for ctg in M[s][i][j]:
if seq.has_key(ctg): M[s][i][j][ctg] = seq[ctg]
else: err += 1
print('%s parsed and searched with %s errors'%(ctg_fasta,err))
return M
#given the result of the tigra_ids->togra_id_to_ctg_map->tigra_ctg_search
#make one easy to search tsv file with:
#f_id t_id t_ctg fasta_style_seq
def write_tigra_ctg_map(M,tigra_tsv_path,t_id=38):
S = '\t'.join(['fusorSV_id','tigra_id','ctg_id','fasta_seq'])+'\n'
for s in M:
for i in sorted(M[s].keys()): #fusorSV call id
for j in sorted(M[s][i].keys()): #tigra call id attached to fusorSV call id i
for c in M[s][i][j]: #tigra ctg attached to the fusorSV call id location and type
S += '\t'.join([str(i),str(j),c,M[s][i][j][c]])+'\n'
with open(tigra_tsv_path,'w') as f:
f.write(S)
return True
return False
#lift over analysis
#TO DO this will need to be updated once the final data structure is set
#construct a filtered SVD of FLTR==PASS
# def construct_svult(vcr,chroms,offset_map,s_id,flt=0,upper=int(500E3)):
# sx,vc_i,vx,k,j = {},{},[],[],0 #filtered container, and j is the originating row
# for vc in vcr: #iterate on the variant call record
# vx += [structural_variant_unit.SVU(vc,offset_map)]
# if vx[-1].chrom in chroms and vx[-1].filter >= flt and vx[-1].svlen < upper:
# sx[tuple(vx[-1].svu[0])] = j
# #if len(vx[-1].svu)>1:
# # sx[tuple(vx[-1].svu[1])] = j
# j += 1
# svua = np.zeros((len(sx),7),dtype='u4')
# k = sorted(sx.keys()) #sorted svua key (x1,x2,t,y1,y2,wx,wy):j for row
# for i in range(len(k)):
# svua[i] = k[i] #sorted, unique entries
# vc_i[i] = sx[k[i]] #get j back
# svult = {}
# types = sorted(list(set(list(svua[:,2]))))
# for t in types:
# l = np.where(svua[:,2]==t)[0]
# L = []
# for i in l:
# x = list(svua[i])
# L += [x[0:3]+[[[x[3],x[4]]],np.float64(x[5]),np.float64(x[6]),{s_id:{vc_i[i]}}]] #build it out
# svult[t] = L
# return svult,vx
def construct_svult(vcr,chroms,offset_map,s_id,vcf_flt=0,
types=None,lower=int(1E0),upper=int(250E6),trim_chr=False):
sx,vc_i,vx,k,j = {},{},[],[],0 #filtered container, and j is the originating row
for vc in vcr: #iterate on the variant call record
sv = structural_variant_unit.SVU(vc,offset_map,trim_chr)
if types is not None and sv.svtype in types:
vx += [sv]
if vx[-1].chrom in chroms and vx[-1].filter >= vcf_flt and \
vx[-1].svlen > lower and vx[-1].svlen < upper:
sx[tuple(vx[-1].svu[0])] = j
j += 1
svua = np.zeros((len(sx),7),dtype='u4')
k = sorted(sx.keys()) #sorted svua key (x1,x2,t,y1,y2,wx,wy):j for row
for i in range(len(k)):
svua[i] = k[i] #sorted, unique entries
vc_i[i] = sx[k[i]] #get j back
svult = {}
if types is None:
types = sorted(list(set(list(svua[:,2]))))
for t in types:
l = np.where(svua[:,2]==t)[0]
L = []
for i in l:
x = list(svua[i])
L += [x[0:3]+[[[x[3],x[4]]],np.float64(x[5]),np.float64(x[6]),{s_id:{vc_i[i]}}]] #build it out
svult[t] = L
return svult,vx
def print_svult(C):
print('x1\tx2\tt\t[y]\twx\twy\t{idx}')
for t in C:
for i in C[t]:
print('%s\t%s\t%s\t%s\t%s\t%s\t%s'%(str(i[0]),str(i[1]),str(i[2]),
str(i[3]),str(i[4]),str(i[5]),str(i[6])))
#given a bash type wildcard glob path, reads the vcf as svult
#do a flt param map for each caller id =>{s_id:flt_val}
# def vcf_glob_to_svultd(path_glob,chroms,offset_map,flt=0,flt_exclude=[]):
# vcfs,S,V = glob.glob(path_glob),{},{}
# for vcf in vcfs:
# vcr = structural_variant_unit.VCF_Reader(vcf) #uses
# s_id = id_trim(vcf)
# if s_id in flt_exclude:
# S[s_id],V[s_id] = construct_svult(vcr,chroms,offset_map,s_id,-1)
# else:
# S[s_id],V[s_id] = construct_svult(vcr,chroms,offset_map,s_id,flt)
# return S,V
def vcf_glob_to_svultd(path_glob,chroms,offset_map,vcf_flt=0,flt_exclude=[],caller_exclude=[],
types=None,lower=int(1E1),upper=int(250E6),trim_chr=False):
vcfs,S,V = glob.glob(path_glob),{},{}
for vcf in vcfs:
s_id = id_trim(vcf)
if not s_id in caller_exclude:
vcr = structural_variant_unit.VCF_Reader(vcf) #uses
s_id = id_trim(vcf)
if s_id in flt_exclude:
S[s_id],V[s_id] = construct_svult(vcr,chroms,offset_map,s_id,-1,types,lower,upper,trim_chr)
else:
S[s_id],V[s_id] = construct_svult(vcr,chroms,offset_map,s_id,vcf_flt,types,lower,upper,trim_chr)
return S,V
#given a vcf with SVCP naming convention, trim to an int value
def id_trim(s):
i = 0
try:
i = int(s.rsplit('/')[-1].rsplit('_S')[-1].split('.')[0])
except Exception:
print('not _S*.vcf named...')
return i
#ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
#input is a set of call sets S and list of list ref regions R
#ouput is a new set of sets T that does not overlap with any of Rs elements
def filter_call_sets(S,R,exclude=[]):
T = {}
types = set([i for l in [S[k].keys() for k in S] for i in l])
for k in S:
N = {}
for t in types:
if S[k].has_key(t):
if k in exclude: N[t] = S[k][t]
else: N[t] = fu.filter_regions(S[k][t],R)
T[k] = N
return T
def filter_call_sets2(S,R,exclude=[]):
T = {}
types = set([i for l in [S[k].keys() for k in S] for i in l])
for k in S:
N = {}
for t in types:
if S[k].has_key(t):
if k in exclude: N[t] = S[k][t]
else: N[t] = filter_regions2(S[k][t],R)
T[k] = N
return T
def filter_regions2(C,R):
D = [-1]
i,j,upper,n,m = 0,0,0,len(C)+1,len(R)+1 #boundries here
if n>1 and m<=1: upper = C[-1][1]
elif m>1 and n<=1: upper = R[-1][1]
elif n>1 and m>1: upper = max(C[-1][1],R[-1][1])
C += [[upper+2,upper+2,0,[],0,0,{}],[upper+4,upper+4,0,[],0,0,{}]] #pad out the end of C
R += [[upper+2,upper+2,0,[],0,0,{}],[upper+4,upper+4,0,[],0,0,{}]] #pad out the end of R
while i+j < n+m: #pivioting dual ordinal indecies scan left to right on C1, C2
a = long(C[i][0])-long(R[j][0])
b = long(C[i][0])-long(R[j][1])
c = long(C[i][1])-long(R[j][0])
d = long(C[i][1])-long(R[j][1])
if a==0 and d==0: #[7] C[i] and R[j] are equal on x
#print('equal to\ti=%s\tj=%s'%(i,j))
if D[-1]!=i: D+=[i]
i += 1
j += 1
elif c<0: #[1] C[i] disjoint of left of R[j]
#print('disjoint left\ti=%s\tj=%s'%(i,j))
i += 1
elif b>0: #[6] C[i] disjoint right of R[j]
#print('disjoint right\ti=%s\tj=%s'%(i,j))
j += 1
elif a<0 and d<0: #[2] C[i] right overlap to R[j] left no envelopment
#print('right overlap\ti=%s\tj=%s'%(i,j))
if D[-1]!=i: D+=[i]
i += 1
elif a>0 and d>0: #[4] C[i] left overlap of R[j] right no envelopment
#print('left overlap\ti=%s\tj=%s'%(i,j))
if D[-1]!=i: D+=[i]
j += 1
elif a<=0 and d>=0: #[3] C[i] envelopment of R[j]
#print('envelopment\ti=%s\tj=%s'%(i,j))
if D[-1]!=i: D+=[i]
j += 1
elif a>=0 and d<=0: #[5] C[i] enveloped by R[j]
#print('being eveloped\ti=%s\tj=%s'%(i,j))
if D[-1]!=i: D+=[i]
i += 1
if i>=n: i,j = n,j+1 #sticky indecies wait for eachother
if j>=m: j,i = m,i+1 #sticky indecies wait for eachother
while len(C) > 0 and C[-1][0] > upper: C.pop()
while len(R) > 0 and R[-1][0] > upper: R.pop()
return [C[x] for x in sorted(set(range(len(C))).difference(set(D[1:])))]
#HTSeq refactor testing
#sample_path = '/Users/tbecker/Desktop/SANDISK/meta_caller_R4/HG00096/'
#offset_map = ru.get_coordinate_offsets('human_g1k_v37_decoy_coordinates.json')
#chroms = [str(i) for i in range(1,23)]+['X','Y','MT']
#S,V = vcf_glob_to_svultd(path_glob=sample_path+'/*vcf',chroms=chroms,offset_map=offset_map,flt=0,flt_exclude=[])
| timothyjamesbecker/FusorSV | fusorsv/svu_utils.py | Python | gpl-3.0 | 50,453 | [
"BWA",
"HTSeq"
] | ecd33e9218cb2f2dd5dc77f8bcd4472abfdf01e43eaf0dda8f6a669c96b62236 |
# encoding: utf-8
"""
An object for managing IPython profile directories.
Authors:
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import shutil
import sys
from IPython.config.configurable import LoggingConfigurable
from IPython.config.loader import Config
from IPython.utils.path import get_ipython_package_dir, expand_path
from IPython.utils.traitlets import List, Unicode, Bool
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Module errors
#-----------------------------------------------------------------------------
class ProfileDirError(Exception):
pass
#-----------------------------------------------------------------------------
# Class for managing profile directories
#-----------------------------------------------------------------------------
class ProfileDir(LoggingConfigurable):
"""An object to manage the profile directory and its resources.
The profile directory is used by all IPython applications, to manage
configuration, logging and security.
This object knows how to find, create and manage these directories. This
should be used by any code that wants to handle profiles.
"""
security_dir_name = Unicode('security')
log_dir_name = Unicode('log')
startup_dir_name = Unicode('startup')
pid_dir_name = Unicode('pid')
security_dir = Unicode(u'')
log_dir = Unicode(u'')
startup_dir = Unicode(u'')
pid_dir = Unicode(u'')
location = Unicode(u'', config=True,
help="""Set the profile location directly. This overrides the logic used by the
`profile` option.""",
)
_location_isset = Bool(False) # flag for detecting multiply set location
def _location_changed(self, name, old, new):
if self._location_isset:
raise RuntimeError("Cannot set profile location more than once.")
self._location_isset = True
if not os.path.isdir(new):
os.makedirs(new)
# ensure config files exist:
self.security_dir = os.path.join(new, self.security_dir_name)
self.log_dir = os.path.join(new, self.log_dir_name)
self.startup_dir = os.path.join(new, self.startup_dir_name)
self.pid_dir = os.path.join(new, self.pid_dir_name)
self.check_dirs()
def _log_dir_changed(self, name, old, new):
self.check_log_dir()
def check_log_dir(self):
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
def _startup_dir_changed(self, name, old, new):
self.check_startup_dir()
def check_startup_dir(self):
if not os.path.isdir(self.startup_dir):
os.mkdir(self.startup_dir)
readme = os.path.join(self.startup_dir, 'README')
src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')
if not os.path.exists(readme):
shutil.copy(src, readme)
def _security_dir_changed(self, name, old, new):
self.check_security_dir()
def check_security_dir(self):
if not os.path.isdir(self.security_dir):
os.mkdir(self.security_dir, 0700)
else:
try:
os.chmod(self.security_dir, 0700)
except OSError:
self.log.warn("Could not set security dir permissions to private.")
def _pid_dir_changed(self, name, old, new):
self.check_pid_dir()
def check_pid_dir(self):
if not os.path.isdir(self.pid_dir):
os.mkdir(self.pid_dir, 0700)
else:
try:
os.chmod(self.pid_dir, 0700)
except OSError:
self.log.warn("Could not set pid dir permissions to private.")
def check_dirs(self):
self.check_security_dir()
self.check_log_dir()
self.check_pid_dir()
self.check_startup_dir()
def copy_config_file(self, config_file, path=None, overwrite=False):
"""Copy a default config file into the active profile directory.
Default configuration files are kept in :mod:`IPython.config.default`.
This function moves these from that location to the working profile
directory.
"""
dst = os.path.join(self.location, config_file)
if os.path.isfile(dst) and not overwrite:
return False
if path is None:
path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
src = os.path.join(path, config_file)
shutil.copy(src, dst)
return True
@classmethod
def create_profile_dir(cls, profile_dir, config=None):
"""Create a new profile directory given a full path.
Parameters
----------
profile_dir : str
The full path to the profile directory. If it does exist, it will
be used. If not, it will be created.
"""
return cls(location=profile_dir, config=config)
@classmethod
def create_profile_dir_by_name(cls, path, name=u'default', config=None):
"""Create a profile dir by profile name and path.
Parameters
----------
path : unicode
The path (directory) to put the profile directory in.
name : unicode
The name of the profile. The name of the profile directory will
be "profile_<profile>".
"""
if not os.path.isdir(path):
raise ProfileDirError('Directory not found: %s' % path)
profile_dir = os.path.join(path, u'profile_' + name)
return cls(location=profile_dir, config=config)
@classmethod
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwdu()``
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [os.getcwdu(), ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
@classmethod
def find_profile_dir(cls, profile_dir, config=None):
"""Find/create a profile dir and return its ProfileDir.
This will create the profile directory if it doesn't exist.
Parameters
----------
profile_dir : unicode or str
The path of the profile directory. This is expanded using
:func:`IPython.utils.genutils.expand_path`.
"""
profile_dir = expand_path(profile_dir)
if not os.path.isdir(profile_dir):
raise ProfileDirError('Profile directory not found: %s' % profile_dir)
return cls(location=profile_dir, config=config)
| cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/profiledir.py | Python | lgpl-3.0 | 8,011 | [
"Brian"
] | 81a640d12c8e7c39e7598a797ee3bebd66847039663eb059474d6e3776dc1611 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""fitting.py -- Default posterior probability function and high-level fitting
methods for prospector
"""
import time
from functools import partial as argfix
import numpy as np
from scipy.optimize import minimize, least_squares
from .minimizer import minimize_wrapper, minimizer_ball
from .ensemble import run_emcee_sampler
from .nested import run_dynesty_sampler
from ..likelihood import lnlike_spec, lnlike_phot, chi_spec, chi_phot, write_log
from ..utils.obsutils import fix_obs
__all__ = ["lnprobfn", "fit_model",
"run_minimize", "run_emcee", "run_dynesty"
]
def lnprobfn(theta, model=None, obs=None, sps=None, noise=(None, None),
residuals=False, nested=False, verbose=False):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the matural log of the posterior. This
requires that an sps object (and if using spectra and gaussian processes, a
NoiseModel) be instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
SedModel model object, with attributes including ``params``, a
dictionary of model parameter state. It must also have
:py:func:`prior_product`, and :py:func:`predict` methods
defined.
:param obs:
A dictionary of observational data. The keys should be
+ ``"wavelength"`` (angstroms)
+ ``"spectrum"`` (maggies)
+ ``"unc"`` (maggies)
+ ``"maggies"`` (photometry in maggies)
+ ``"maggies_unc"`` (photometry uncertainty in maggies)
+ ``"filters"`` (iterable of :py:class:`sedpy.observate.Filter`)
+ and optional spectroscopic ``"mask"`` and ``"phot_mask"``
(same length as ``spectrum`` and ``maggies`` respectively,
True means use the data points)
:param sps:
A :py:class:`prospect.sources.SSPBasis` object or subclass thereof, or
any object with a ``get_spectrum`` method that will take a dictionary
of model parameters and return a spectrum, photometry, and ancillary
information.
:param noise: (optional, default: (None, None))
A 2-element tuple of :py:class:`prospect.likelihood.NoiseModel` objects.
:param residuals: (optional, default: False)
A switch to allow vectors of :math:`\chi` values to be returned instead
of a scalar posterior probability. This can be useful for
least-squares optimization methods. Note that prior probabilities are
not included in this calculation.
:param nested: (optional, default: False)
If ``True``, do not add the ln-prior probability to the ln-likelihood
when computing the ln-posterior. For nested sampling algorithms the
prior probability is incorporated in the way samples are drawn, so
should not be included here.
:returns lnp:
Ln posterior probability, unless ``residuals=True`` in which case a
vector of :math:`\chi` values is returned.
"""
if residuals:
lnnull = np.zeros(obs["ndof"]) - 1e18 # np.infty
#lnnull = -np.infty
else:
lnnull = -np.infty
# --- Calculate prior probability and exit if not within prior ---
lnp_prior = model.prior_product(theta, nested=nested)
if not np.isfinite(lnp_prior):
return lnnull
# --- Update Noise Model ---
spec_noise, phot_noise = noise
vectors, sigma_spec = {}, None
model.set_parameters(theta)
if spec_noise is not None:
spec_noise.update(**model.params)
vectors.update({"unc": obs.get('unc', None)})
sigma_spec = spec_noise.construct_covariance(**vectors)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors.update({'phot_unc': obs.get('maggies_unc', None)})
# --- Generate mean model ---
try:
t1 = time.time()
spec, phot, x = model.predict(theta, obs, sps=sps, sigma_spec=sigma_spec)
d1 = time.time() - t1
except(ValueError):
return lnnull
except:
print("There was an error during the likelihood call at parameters {}".format(theta))
raise
# --- Optionally return chi vectors for least-squares ---
# note this does not include priors!
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# --- Mixture Model ---
f_outlier_spec = model.params.get('f_outlier_spec', 0.0)
if (f_outlier_spec != 0.0):
sigma_outlier_spec = model.params.get('nsigma_outlier_spec', 10)
vectors.update({'nsigma_outlier_spec': sigma_outlier_spec})
f_outlier_phot = model.params.get('f_outlier_phot', 0.0)
if (f_outlier_phot != 0.0):
sigma_outlier_phot = model.params.get('nsigma_outlier_phot', 10)
vectors.update({'nsigma_outlier_phot': sigma_outlier_phot})
# --- Emission Lines ---
# --- Calculate likelihoods ---
t1 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs,
f_outlier_spec=f_outlier_spec,
spec_noise=spec_noise,
**vectors)
lnp_phot = lnlike_phot(phot, obs=obs,
f_outlier_phot=f_outlier_phot,
phot_noise=phot_noise, **vectors)
lnp_eline = getattr(model, '_ln_eline_penalty', 0.0)
d2 = time.time() - t1
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec + lnp_eline
def wrap_lnp(lnpfn, obs, model, sps, **lnp_kwargs):
return argfix(lnpfn, obs=obs, model=model, sps=sps,
**lnp_kwargs)
def fit_model(obs, model, sps, noise=(None, None), lnprobfn=lnprobfn,
optimize=False, emcee=False, dynesty=True, **kwargs):
"""Fit a model to observations using a number of different methods
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise: (optional, default: (None, None))
A tuple of NoiseModel objects for the spectroscopy and photometry
respectively. Can also be (None, None) in which case simple chi-square
will be used.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param optimize: (optional, default: False)
If ``True``, conduct a round of optimization before sampling from the
posterior. The model state will be set to the best value at the end of
optimization before continuing on to sampling or returning. Parameters
controlling the optimization can be passed via ``kwargs``, including
+ ``min_method``: 'lm' | 'powell'
+ ``nmin``: number of minimizations to do. Beyond the first, minimizations
will be started from draws from the prior.
+ ``min_opts``: dictionary of minimization options passed to the
scipy.optimize.minimize method.
See :py:func:`run_minimize` for details.
:param emcee: (optional, default: False)
If ``True``, sample from the posterior using emcee. Additonal
parameters controlling emcee can be passed via ``**kwargs``. These include
+ ``initial_positions``: A set of initial positions for the walkers
+ ``hfile``: an open h5py.File file handle for writing result incrementally
Many additional emcee parameters can be provided here, see
:py:func:`run_emcee` for details.
:param dynesty:
If ``True``, sample from the posterior using dynesty. Additonal
parameters controlling dynesty can be passed via ``**kwargs``. See
:py:func:`run_dynesty` for details.
:returns output:
A dictionary with two keys, 'optimization' and 'sampling'. The value
of each of these is a 2-tuple with results in the first element and
durations (in seconds) in the second element.
"""
# Make sure obs has required keys
obs = fix_obs(obs)
if emcee & dynesty:
msg = ("Cannot run both emcee and dynesty fits "
"in a single call to fit_model")
raise(ValueError, msg)
output = {"optimization": (None, 0.),
"sampling": (None, 0.)}
if optimize:
optres, topt, best = run_minimize(obs, model, sps, noise,
lnprobfn=lnprobfn, **kwargs)
# set to the best
model.set_parameters(optres[best].x)
output["optimization"] = (optres, topt)
if emcee:
run_sampler = run_emcee
elif dynesty:
run_sampler = run_dynesty
else:
return output
output["sampling"] = run_sampler(obs, model, sps, noise,
lnprobfn=lnprobfn, **kwargs)
return output
def run_minimize(obs=None, model=None, sps=None, noise=None, lnprobfn=lnprobfn,
min_method='lm', min_opts={}, nmin=1, pool=None, **extras):
"""Run a minimization. This wraps the lnprobfn fixing the ``obs``,
``model``, ``noise``, and ``sps`` objects, and then runs a minimization of
-lnP using scipy.optimize methods.
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise: (optional)
If given, a tuple of :py:class:`NoiseModel` objects passed to
``lnprobfn``.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param min_method: (optional, default: 'lm')
Method to use for minimization
* 'lm': Levenberg-Marquardt
* 'powell': Powell line search method
:param nmin: (optional, default: 1)
Number of minimizations to do. Beyond the first, minimizations will be
started from draws from the prior.
:param min_opts: (optional, default: {})
Dictionary of minimization options passed to the scipy.optimize method.
These include things like 'xtol', 'ftol', etc..
:param pool: (optional, default: None)
A pool to use for parallel optimization from multiple initial positions.
:returns results:
A list of `scipy.optimize.OptimizeResult` objects.
:returns tm:
Wall time used for the minimization, in seconds.
:returns best:
The index of the results list containing the lowest chi-square result.
"""
initial = model.theta.copy()
lsq = ["lm"]
scalar = ["powell"]
# --- Set some options based on minimization method ---
if min_method in lsq:
algorithm = least_squares
residuals = True
min_opts["x_scale"] = "jac"
elif min_method in scalar:
algorithm = minimize
residuals = False
args = []
loss = argfix(lnprobfn, obs=obs, model=model, sps=sps,
noise=noise, residuals=residuals)
minimizer = minimize_wrapper(algorithm, loss, [], min_method, min_opts)
qinit = minimizer_ball(initial, nmin, model)
if pool is not None:
M = pool.map
else:
M = map
t = time.time()
results = list(M(minimizer, [np.array(q) for q in qinit]))
tm = time.time() - t
if min_method in lsq:
chisq = [np.sum(r.fun**2) for r in results]
best = np.argmin(chisq)
elif min_method in scalar:
best = np.argmin([p.fun for p in results])
return results, tm, best
def run_emcee(obs, model, sps, noise, lnprobfn=lnprobfn,
hfile=None, initial_positions=None,
**kwargs):
"""Run emcee, optionally including burn-in and convergence checking. Thin
wrapper on :py:class:`prospect.fitting.ensemble.run_emcee_sampler`
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise:
A tuple of :py:class:`NoiseModel` objects passed to ``lnprobfn``.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param hfile: (optional, default: None)
A file handle for a :py:class:`h5py.File` object that will be written
to incremantally during sampling.
:param initial_positions: (optional, default: None)
If given, a set of initial positions for the emcee walkers. Must have
shape (nwalkers, ndim). Rounds of burn-in will be skipped if this
parameter is present.
Extra Parameters
--------
:param nwalkers:
The number of walkers to use. If None, use the nearest power of two to
``ndim * walker_factor``.
:param niter:
Number of iterations for the production run
:param nburn:
List of the number of iterations to run in each round of burn-in (for
removing stuck walkers.) E.g. `nburn=[32, 64]` will run the sampler for
32 iterations before reinitializing and then run the sampler for
another 64 iterations before starting the production run.
:param storechain: (default: True)
If using HDF5 output, setting this to False will keep the chain from
being held in memory by the sampler object.
:param pool: (optional)
A ``Pool`` object, either from ``multiprocessing`` or from
``emcee.mpi_pool``.
:param interval:
Fraction of the full run at which to flush to disk, if using hdf5 for
output.
:param convergence_check_interval:
How often to assess convergence, in number of iterations. If this is
not `None`, then the KL convergence test is run.
:param convergence_chunks:
The number of iterations to combine when creating the marginalized
parameter probability functions.
:param convergence_stable_points_criteria:
The number of stable convergence checks that the chain must pass before
being declared stable.
Returns
--------
:returns sampler:
An instance of :py:class:`emcee.EnsembleSampler`.
:returns ts:
Duration of sampling (including burn-in) in seconds of wall time.
"""
q = model.theta.copy()
postkwargs = {"obs": obs,
"model": model,
"sps": sps,
"noise": noise,
"nested": False,
}
# Could try to make signatures for these two methods the same....
if initial_positions is not None:
meth = restart_emcee_sampler
t = time.time()
out = meth(lnprobfn, initial_positions, hdf5=hfile,
postkwargs=postkwargs, **kwargs)
sampler = out
ts = time.time() - t
else:
meth = run_emcee_sampler
t = time.time()
out = meth(lnprobfn, q, model, hdf5=hfile,
postkwargs=postkwargs, **kwargs)
sampler, burn_p0, burn_prob0 = out
ts = time.time() - t
return sampler, ts
def run_dynesty(obs, model, sps, noise, lnprobfn=lnprobfn,
pool=None, nested_posterior_thresh=0.05, **kwargs):
"""Thin wrapper on :py:class:`prospect.fitting.nested.run_dynesty_sampler`
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise:
A tuple of :py:class:`prospect.likelihood.NoiseModel` objects passed to
``lnprobfn``.
:param lnprobfn: (optional, default: :py:func:`lnprobfn`)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. This function must also take a
``nested`` keyword.
Extra Parameters
--------
:param nested_bound: (optional, default: 'multi')
:param nested_sample: (optional, default: 'unif')
:param nested_nlive_init: (optional, default: 100)
:param nested_nlive_batch: (optional, default: 100)
:param nested_dlogz_init: (optional, default: 0.02)
:param nested_maxcall: (optional, default: None)
:param nested_walks: (optional, default: 25)
Returns
--------
:returns result:
An instance of :py:class:`dynesty.results.Results`.
:returns ts:
Duration of sampling in seconds of wall time.
"""
from dynesty.dynamicsampler import stopping_function, weight_function
nested_stop_kwargs = {"post_thresh": nested_posterior_thresh}
lnp = wrap_lnp(lnprobfn, obs, model, sps, noise=noise,
nested=True)
# Need to deal with postkwargs...
t = time.time()
dynestyout = run_dynesty_sampler(lnp, model.prior_transform, model.ndim,
stop_function=stopping_function,
wt_function=weight_function,
nested_stop_kwargs=nested_stop_kwargs,
pool=pool, **kwargs)
ts = time.time() - t
return dynestyout, ts
| jrleja/bsfh | prospect/fitting/fitting.py | Python | mit | 19,143 | [
"Gaussian"
] | 3c975cce9225876baf30d2fff77bb461077513913b503e99f330b67373e6c8cb |
# Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from datetime import timedelta
from inspect import getdoc, isclass
from typing import Optional, List
from robot.api import logger
from robot.errors import DataError
from robot.libraries.BuiltIn import BuiltIn
from robot.utils import is_string
from robot.utils.importer import Importer
from robotlibcore import DynamicCore
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from SeleniumLibrary.base import LibraryComponent
from SeleniumLibrary.errors import NoOpenBrowser, PluginError
from SeleniumLibrary.keywords import (
AlertKeywords,
BrowserManagementKeywords,
CookieKeywords,
ElementKeywords,
FormElementKeywords,
FrameKeywords,
JavaScriptKeywords,
RunOnFailureKeywords,
ScreenshotKeywords,
SelectElementKeywords,
TableElementKeywords,
WaitingKeywords,
WebDriverCache,
WindowKeywords,
)
from SeleniumLibrary.keywords.screenshot import EMBED
from SeleniumLibrary.locators import ElementFinder
from SeleniumLibrary.utils import LibraryListener, is_truthy, _convert_timeout
__version__ = "5.2.0.dev1"
class SeleniumLibrary(DynamicCore):
"""SeleniumLibrary is a web testing library for Robot Framework.
This document explains how to use keywords provided by SeleniumLibrary.
For information about installation, support, and more, please visit the
[https://github.com/robotframework/SeleniumLibrary|project pages].
For more information about Robot Framework, see http://robotframework.org.
SeleniumLibrary uses the Selenium WebDriver modules internally to
control a web browser. See http://seleniumhq.org for more information
about Selenium in general and SeleniumLibrary README.rst
[https://github.com/robotframework/SeleniumLibrary#browser-drivers|Browser drivers chapter]
for more details about WebDriver binary installation.
%TOC%
= Locating elements =
All keywords in SeleniumLibrary that need to interact with an element
on a web page take an argument typically named ``locator`` that specifies
how to find the element. Most often the locator is given as a string
using the locator syntax described below, but `using WebElements` is
possible too.
== Locator syntax ==
SeleniumLibrary supports finding elements based on different strategies
such as the element id, XPath expressions, or CSS selectors. The strategy
can either be explicitly specified with a prefix or the strategy can be
implicit.
=== Default locator strategy ===
By default, locators are considered to use the keyword specific default
locator strategy. All keywords support finding elements based on ``id``
and ``name`` attributes, but some keywords support additional attributes
or other values that make sense in their context. For example, `Click
Link` supports the ``href`` attribute and the link text and addition
to the normal ``id`` and ``name``.
Examples:
| `Click Element` | example | # Match based on ``id`` or ``name``. |
| `Click Link` | example | # Match also based on link text and ``href``. |
| `Click Button` | example | # Match based on ``id``, ``name`` or ``value``. |
If a locator accidentally starts with a prefix recognized as `explicit
locator strategy` or `implicit XPath strategy`, it is possible to use
the explicit ``default`` prefix to enable the default strategy.
Examples:
| `Click Element` | name:foo | # Find element with name ``foo``. |
| `Click Element` | default:name:foo | # Use default strategy with value ``name:foo``. |
| `Click Element` | //foo | # Find element using XPath ``//foo``. |
| `Click Element` | default: //foo | # Use default strategy with value ``//foo``. |
=== Explicit locator strategy ===
The explicit locator strategy is specified with a prefix using either
syntax ``strategy:value`` or ``strategy=value``. The former syntax
is preferred because the latter is identical to Robot Framework's
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#named-argument-syntax|
named argument syntax] and that can cause problems. Spaces around
the separator are ignored, so ``id:foo``, ``id: foo`` and ``id : foo``
are all equivalent.
Locator strategies that are supported by default are listed in the table
below. In addition to them, it is possible to register `custom locators`.
| = Strategy = | = Match based on = | = Example = |
| id | Element ``id``. | ``id:example`` |
| name | ``name`` attribute. | ``name:example`` |
| identifier | Either ``id`` or ``name``. | ``identifier:example`` |
| class | Element ``class``. | ``class:example`` |
| tag | Tag name. | ``tag:div`` |
| xpath | XPath expression. | ``xpath://div[@id="example"]`` |
| css | CSS selector. | ``css:div#example`` |
| dom | DOM expression. | ``dom:document.images[5]`` |
| link | Exact text a link has. | ``link:The example`` |
| partial link | Partial link text. | ``partial link:he ex`` |
| sizzle | Sizzle selector deprecated. | ``sizzle:div.example`` |
| data | Element ``data-*`` attribute | ``data:id:my_id`` |
| jquery | jQuery expression. | ``jquery:div.example`` |
| default | Keyword specific default behavior. | ``default:example`` |
See the `Default locator strategy` section below for more information
about how the default strategy works. Using the explicit ``default``
prefix is only necessary if the locator value itself accidentally
matches some of the explicit strategies.
Different locator strategies have different pros and cons. Using ids,
either explicitly like ``id:foo`` or by using the `default locator
strategy` simply like ``foo``, is recommended when possible, because
the syntax is simple and locating elements by id is fast for browsers.
If an element does not have an id or the id is not stable, other
solutions need to be used. If an element has a unique tag name or class,
using ``tag``, ``class`` or ``css`` strategy like ``tag:h1``,
``class:example`` or ``css:h1.example`` is often an easy solution. In
more complex cases using XPath expressions is typically the best
approach. They are very powerful but a downside is that they can also
get complex.
Examples:
| `Click Element` | id:foo | # Element with id 'foo'. |
| `Click Element` | css:div#foo h1 | # h1 element under div with id 'foo'. |
| `Click Element` | xpath: //div[@id="foo"]//h1 | # Same as the above using XPath, not CSS. |
| `Click Element` | xpath: //*[contains(text(), "example")] | # Element containing text 'example'. |
*NOTE:*
- The ``strategy:value`` syntax is only supported by SeleniumLibrary 3.0
and newer.
- Using the ``sizzle`` strategy or its alias ``jquery`` requires that
the system under test contains the jQuery library.
- Prior to SeleniumLibrary 3.0, table related keywords only supported
``xpath``, ``css`` and ``sizzle/jquery`` strategies.
- ``data`` strategy is conveniance locator that will construct xpath from the parameters.
If you have element like `<div data-automation="automation-id-2">`, you locate the element via
``data:automation:automation-id-2``. This feature was added in SeleniumLibrary 5.2.0
=== Implicit XPath strategy ===
If the locator starts with ``//`` or multiple opening parenthesis in front
of the ``//``, the locator is considered to be an XPath expression. In other
words, using ``//div`` is equivalent to using explicit ``xpath://div`` and
``((//div))`` is equivalent to using explicit ``xpath:((//div))``
Examples:
| `Click Element` | //div[@id="foo"]//h1 |
| `Click Element` | (//div)[2] |
The support for the ``(//`` prefix is new in SeleniumLibrary 3.0.
Supporting multiple opening parenthesis is new in SeleniumLibrary 5.0.
=== Chaining locators ===
It is possible chain multiple locators together as single locator. Each chained locator must start with locator
strategy. Chained locators must be separated with single space, two greater than characters and followed with
space. It is also possible mix different locator strategies, example css or xpath. Also a list can also be
used to specify multiple locators. This is useful, is some part of locator would match as the locator separator
but it should not. Or if there is need to existing WebElement as locator.
Although all locators support chaining, some locator strategies do not abey the chaining. This is because
some locator strategies use JavaScript to find elements and JavaScript is executed for the whole browser context
and not for the element found be the previous locator. Chaining is supported by locator strategies which
are based on Selenium API, like `xpath` or `css`, but example chaining is not supported by `sizzle` or `jquery
Examples:
| `Click Element` | css:.bar >> xpath://a | # To find a link which is present after an element with class "bar" |
List examples:
| ${locator_list} = | `Create List` | css:div#div_id | xpath://*[text(), " >> "] |
| `Page Should Contain Element` | ${locator_list} | | |
| ${element} = | Get WebElement | xpath://*[text(), " >> "] | |
| ${locator_list} = | `Create List` | css:div#div_id | ${element} |
| `Page Should Contain Element` | ${locator_list} | | |
Chaining locators in new in SeleniumLibrary 5.0
== Using WebElements ==
In addition to specifying a locator as a string, it is possible to use
Selenium's WebElement objects. This requires first getting a WebElement,
for example, by using the `Get WebElement` keyword.
| ${elem} = | `Get WebElement` | id:example |
| `Click Element` | ${elem} | |
== Custom locators ==
If more complex lookups are required than what is provided through the
default locators, custom lookup strategies can be created. Using custom
locators is a two part process. First, create a keyword that returns
a WebElement that should be acted on:
| Custom Locator Strategy | [Arguments] | ${browser} | ${locator} | ${tag} | ${constraints} |
| | ${element}= | Execute Javascript | return window.document.getElementById('${locator}'); |
| | [Return] | ${element} |
This keyword is a reimplementation of the basic functionality of the
``id`` locator where ``${browser}`` is a reference to a WebDriver
instance and ``${locator}`` is the name of the locator strategy. To use
this locator, it must first be registered by using the
`Add Location Strategy` keyword:
| `Add Location Strategy` | custom | Custom Locator Strategy |
The first argument of `Add Location Strategy` specifies the name of
the strategy and it must be unique. After registering the strategy,
the usage is the same as with other locators:
| `Click Element` | custom:example |
See the `Add Location Strategy` keyword for more details.
= Browser and Window =
There is different conceptual meaning when SeleniumLibrary talks
about windows or browsers. This chapter explains those differences.
== Browser ==
When `Open Browser` or `Create WebDriver` keyword is called, it
will create a new Selenium WebDriver instance by using the
[https://www.seleniumhq.org/docs/03_webdriver.jsp|Selenium WebDriver]
API. In SeleniumLibrary terms, a new browser is created. It is
possible to start multiple independent browsers (Selenium Webdriver
instances) at the same time, by calling `Open Browser` or
`Create WebDriver` multiple times. These browsers are usually
independent of each other and do not share data like cookies,
sessions or profiles. Typically when the browser starts, it
creates a single window which is shown to the user.
== Window ==
Windows are the part of a browser that loads the web site and presents
it to the user. All content of the site is the content of the window.
Windows are children of a browser. In SeleniumLibrary browser is a
synonym for WebDriver instance. One browser may have multiple
windows. Windows can appear as tabs, as separate windows or pop-ups with
different position and size. Windows belonging to the same browser
typically share the sessions detail, like cookies. If there is a
need to separate sessions detail, example login with two different
users, two browsers (Selenium WebDriver instances) must be created.
New windows can be opened example by the application under test or
by example `Execute Javascript` keyword:
| `Execute Javascript` window.open() # Opens a new window with location about:blank
The example below opens multiple browsers and windows,
to demonstrate how the different keywords can be used to interact
with browsers, and windows attached to these browsers.
Structure:
| BrowserA
| Window 1 (location=https://robotframework.org/)
| Window 2 (location=https://robocon.io/)
| Window 3 (location=https://github.com/robotframework/)
|
| BrowserB
| Window 1 (location=https://github.com/)
Example:
| `Open Browser` | https://robotframework.org | ${BROWSER} | alias=BrowserA | # BrowserA with first window is opened. |
| `Execute Javascript` | window.open() | | | # In BrowserA second window is opened. |
| `Switch Window` | locator=NEW | | | # Switched to second window in BrowserA |
| `Go To` | https://robocon.io | | | # Second window navigates to robocon site. |
| `Execute Javascript` | window.open() | | | # In BrowserA third window is opened. |
| ${handle} | `Switch Window` | locator=NEW | | # Switched to third window in BrowserA |
| `Go To` | https://github.com/robotframework/ | | | # Third windows goes to robot framework github site. |
| `Open Browser` | https://github.com | ${BROWSER} | alias=BrowserB | # BrowserB with first windows is opened. |
| ${location} | `Get Location` | | | # ${location} is: https://www.github.com |
| `Switch Window` | ${handle} | browser=BrowserA | | # BrowserA second windows is selected. |
| ${location} | `Get Location` | | | # ${location} = https://robocon.io/ |
| @{locations 1} | `Get Locations` | | | # By default, lists locations under the currectly active browser (BrowserA). |
| @{locations 2} | `Get Locations` | browser=ALL | | # By using browser=ALL argument keyword list all locations from all browsers. |
The above example, @{locations 1} contains the following items:
https://robotframework.org/, https://robocon.io/ and
https://github.com/robotframework/'. The @{locations 2}
contains the following items: https://robotframework.org/,
https://robocon.io/, https://github.com/robotframework/'
and 'https://github.com/.
= Timeouts, waits, and delays =
This section discusses different ways how to wait for elements to
appear on web pages and to slow down execution speed otherwise.
It also explains the `time format` that can be used when setting various
timeouts, waits, and delays.
== Timeout ==
SeleniumLibrary contains various keywords that have an optional
``timeout`` argument that specifies how long these keywords should
wait for certain events or actions. These keywords include, for example,
``Wait ...`` keywords and keywords related to alerts. Additionally
`Execute Async Javascript`. Although it does not have ``timeout``,
argument, uses a timeout to define how long asynchronous JavaScript
can run.
The default timeout these keywords use can be set globally either by
using the `Set Selenium Timeout` keyword or with the ``timeout`` argument
when `importing` the library. See `time format` below for supported
timeout syntax.
== Implicit wait ==
Implicit wait specifies the maximum time how long Selenium waits when
searching for elements. It can be set by using the `Set Selenium Implicit
Wait` keyword or with the ``implicit_wait`` argument when `importing`
the library. See [https://www.seleniumhq.org/docs/04_webdriver_advanced.jsp|
Selenium documentation] for more information about this functionality.
See `time format` below for supported syntax.
== Selenium speed ==
Selenium execution speed can be slowed down globally by using `Set
Selenium speed` keyword. This functionality is designed to be used for
demonstrating or debugging purposes. Using it to make sure that elements
appear on a page is not a good idea. The above-explained timeouts
and waits should be used instead.
See `time format` below for supported syntax.
== Time format ==
All timeouts and waits can be given as numbers considered seconds
(e.g. ``0.5`` or ``42``) or in Robot Framework's time syntax
(e.g. ``1.5 seconds`` or ``1 min 30 s``). For more information about
the time syntax see the
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#time-format|Robot Framework User Guide].
= Run-on-failure functionality =
SeleniumLibrary has a handy feature that it can automatically execute
a keyword if any of its own keywords fails. By default, it uses the
`Capture Page Screenshot` keyword, but this can be changed either by
using the `Register Keyword To Run On Failure` keyword or with the
``run_on_failure`` argument when `importing` the library. It is
possible to use any keyword from any imported library or resource file.
The run-on-failure functionality can be disabled by using a special value
``NOTHING`` or anything considered false (see `Boolean arguments`)
such as ``NONE``.
= Boolean arguments =
Starting from 5.0 SeleniumLibrary relies on Robot Framework to perform the
boolean conversion based on keyword arguments [https://docs.python.org/3/library/typing.html|type hint].
More details in Robot Framework
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#supported-conversions|user guide]
Please note SeleniumLibrary 3 and 4 did have own custom methods to covert
arguments to boolean values.
= EventFiringWebDriver =
The SeleniumLibrary offers support for
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_support/selenium.webdriver.support.event_firing_webdriver.html#module-selenium.webdriver.support.event_firing_webdriver|EventFiringWebDriver].
See the Selenium and SeleniumLibrary
[https://github.com/robotframework/SeleniumLibrary/blob/master/docs/extending/extending.rst#EventFiringWebDriver|EventFiringWebDriver support]
documentation for further details.
EventFiringWebDriver is new in SeleniumLibrary 4.0
= Thread support =
SeleniumLibrary is not thread-safe. This is mainly due because the underlying
[https://github.com/SeleniumHQ/selenium/wiki/Frequently-Asked-Questions#q-is-webdriver-thread-safe|
Selenium tool is not thread-safe] within one browser/driver instance.
Because of the limitation in the Selenium side, the keywords or the
API provided by the SeleniumLibrary is not thread-safe.
= Plugins =
SeleniumLibrary offers plugins as a way to modify and add library keywords and modify some of the internal
functionality without creating a new library or hacking the source code. See
[https://github.com/robotframework/SeleniumLibrary/blob/master/docs/extending/extending.rst#Plugins|plugin API]
documentation for further details.
Plugin API is new SeleniumLibrary 4.0
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = __version__
def __init__(
self,
timeout=timedelta(seconds=5),
implicit_wait=timedelta(seconds=0),
run_on_failure="Capture Page Screenshot",
screenshot_root_directory: Optional[str] = None,
plugins: Optional[str] = None,
event_firing_webdriver: Optional[str] = None,
):
"""SeleniumLibrary can be imported with several optional arguments.
- ``timeout``:
Default value for `timeouts` used with ``Wait ...`` keywords.
- ``implicit_wait``:
Default value for `implicit wait` used when locating elements.
- ``run_on_failure``:
Default action for the `run-on-failure functionality`.
- ``screenshot_root_directory``:
Path to folder where possible screenshots are created or EMBED.
See `Set Screenshot Directory` keyword for further details about EMBED.
If not given, the directory where the log file is written is used.
- ``plugins``:
Allows extending the SeleniumLibrary with external Python classes.
- ``event_firing_webdriver``:
Class for wrapping Selenium with
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_support/selenium.webdriver.support.event_firing_webdriver.html#module-selenium.webdriver.support.event_firing_webdriver|EventFiringWebDriver]
"""
self.timeout = _convert_timeout(timeout)
self.implicit_wait = _convert_timeout(implicit_wait)
self.speed = 0.0
self.run_on_failure_keyword = RunOnFailureKeywords.resolve_keyword(
run_on_failure
)
self._running_on_failure_keyword = False
self.screenshot_root_directory = screenshot_root_directory
self._resolve_screenshot_root_directory()
self._element_finder = ElementFinder(self)
self._plugin_keywords = []
libraries = [
AlertKeywords(self),
BrowserManagementKeywords(self),
CookieKeywords(self),
ElementKeywords(self),
FormElementKeywords(self),
FrameKeywords(self),
JavaScriptKeywords(self),
RunOnFailureKeywords(self),
ScreenshotKeywords(self),
SelectElementKeywords(self),
TableElementKeywords(self),
WaitingKeywords(self),
WindowKeywords(self),
]
self.ROBOT_LIBRARY_LISTENER = LibraryListener()
self._running_keyword = None
self.event_firing_webdriver = None
if is_truthy(event_firing_webdriver):
self.event_firing_webdriver = self._parse_listener(event_firing_webdriver)
self._plugins = []
if is_truthy(plugins):
plugin_libs = self._parse_plugins(plugins)
self._plugins = plugin_libs
libraries = libraries + plugin_libs
self._drivers = WebDriverCache()
DynamicCore.__init__(self, libraries)
def run_keyword(self, name: str, args: tuple, kwargs: dict):
try:
return DynamicCore.run_keyword(self, name, args, kwargs)
except Exception:
self.failure_occurred()
raise
def get_keyword_tags(self, name: str) -> list:
tags = list(DynamicCore.get_keyword_tags(self, name))
if name in self._plugin_keywords:
tags.append("plugin")
return tags
def get_keyword_documentation(self, name: str) -> str:
if name == "__intro__":
return self._get_intro_documentation()
return DynamicCore.get_keyword_documentation(self, name)
def _parse_plugin_doc(self):
Doc = namedtuple("Doc", "doc, name")
for plugin in self._plugins:
yield Doc(
doc=getdoc(plugin) or "No plugin documentation found.",
name=plugin.__class__.__name__,
)
def _get_intro_documentation(self):
intro = DynamicCore.get_keyword_documentation(self, "__intro__")
for plugin_doc in self._parse_plugin_doc():
intro = f"{intro}\n\n"
intro = f"{intro}= Plugin: {plugin_doc.name} =\n\n"
intro = f"{intro}{plugin_doc.doc}"
return intro
def register_driver(self, driver: WebDriver, alias: str):
"""Add's a `driver` to the library WebDriverCache.
:param driver: Instance of the Selenium `WebDriver`.
:type driver: selenium.webdriver.remote.webdriver.WebDriver
:param alias: Alias given for this `WebDriver` instance.
:type alias: str
:return: The index of the `WebDriver` instance.
:rtype: int
"""
return self._drivers.register(driver, alias)
def failure_occurred(self):
"""Method that is executed when a SeleniumLibrary keyword fails.
By default, executes the registered run-on-failure keyword.
Libraries extending SeleniumLibrary can overwrite this hook
method if they want to provide custom functionality instead.
"""
if self._running_on_failure_keyword or not self.run_on_failure_keyword:
return
try:
self._running_on_failure_keyword = True
if self.run_on_failure_keyword.lower() == "capture page screenshot":
self.capture_page_screenshot()
else:
BuiltIn().run_keyword(self.run_on_failure_keyword)
except Exception as err:
logger.warn(
f"Keyword '{self.run_on_failure_keyword}' could not be run on failure: {err}"
)
finally:
self._running_on_failure_keyword = False
@property
def driver(self) -> WebDriver:
"""Current active driver.
:rtype: selenium.webdriver.remote.webdriver.WebDriver
:raises SeleniumLibrary.errors.NoOpenBrowser: If browser is not open.
"""
if not self._drivers.current:
raise NoOpenBrowser("No browser is open.")
return self._drivers.current
def find_element(
self, locator: str, parent: Optional[WebElement] = None
) -> WebElement:
"""Find element matching `locator`.
:param locator: Locator to use when searching the element.
See library documentation for the supported locator syntax.
:type locator: str or selenium.webdriver.remote.webelement.WebElement
:param parent: Optional parent `WebElememt` to search child elements
from. By default, search starts from the root using `WebDriver`.
:type parent: selenium.webdriver.remote.webelement.WebElement
:return: Found `WebElement`.
:rtype: selenium.webdriver.remote.webelement.WebElement
:raises SeleniumLibrary.errors.ElementNotFound: If element not found.
"""
return self._element_finder.find(locator, parent=parent)
def find_elements(
self, locator: str, parent: WebElement = None
) -> List[WebElement]:
"""Find all elements matching `locator`.
:param locator: Locator to use when searching the element.
See library documentation for the supported locator syntax.
:type locator: str or selenium.webdriver.remote.webelement.WebElement
:param parent: Optional parent `WebElememt` to search child elements
from. By default, search starts from the root using `WebDriver`.
:type parent: selenium.webdriver.remote.webelement.WebElement
:return: list of found `WebElement` or e,mpty if elements are not found.
:rtype: list[selenium.webdriver.remote.webelement.WebElement]
"""
return self._element_finder.find(
locator, first_only=False, required=False, parent=parent
)
def _parse_plugins(self, plugins):
libraries = []
importer = Importer("test library")
for parsed_plugin in self._string_to_modules(plugins):
plugin = importer.import_class_or_module(parsed_plugin.module)
if not isclass(plugin):
message = f"Importing test library: '{parsed_plugin.module}' failed."
raise DataError(message)
plugin = plugin(self, *parsed_plugin.args, **parsed_plugin.kw_args)
if not isinstance(plugin, LibraryComponent):
message = (
"Plugin does not inherit SeleniumLibrary.base.LibraryComponent"
)
raise PluginError(message)
self._store_plugin_keywords(plugin)
libraries.append(plugin)
return libraries
def _parse_listener(self, event_firing_webdriver):
listener_module = self._string_to_modules(event_firing_webdriver)
listener_count = len(listener_module)
if listener_count > 1:
message = f"Is is possible import only one listener but there was {listener_count} listeners."
raise ValueError(message)
listener_module = listener_module[0]
importer = Importer("test library")
listener = importer.import_class_or_module(listener_module.module)
if not isclass(listener):
message = f"Importing test Selenium lister class '{listener_module.module}' failed."
raise DataError(message)
return listener
def _string_to_modules(self, modules):
Module = namedtuple("Module", "module, args, kw_args")
parsed_modules = []
for module in modules.split(","):
module = module.strip()
module_and_args = module.split(";")
module_name = module_and_args.pop(0)
kw_args = {}
args = []
for argument in module_and_args:
if "=" in argument:
key, value = argument.split("=")
kw_args[key] = value
else:
args.append(argument)
module = Module(module=module_name, args=args, kw_args=kw_args)
parsed_modules.append(module)
return parsed_modules
def _store_plugin_keywords(self, plugin):
dynamic_core = DynamicCore([plugin])
self._plugin_keywords.extend(dynamic_core.get_keyword_names())
def _resolve_screenshot_root_directory(self):
screenshot_root_directory = self.screenshot_root_directory
if is_string(screenshot_root_directory):
if screenshot_root_directory.upper() == EMBED:
self.screenshot_root_directory = EMBED
| rtomac/robotframework-selenium2library | src/SeleniumLibrary/__init__.py | Python | apache-2.0 | 32,904 | [
"VisIt"
] | 4e163efe9e40e15ec18c867bee62f370f24d90d1c6030fbdb2f244d35f60eb34 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides
"""
from collections import OrderedDict, namedtuple
from monty.functools import lazy_property
from monty.json import MSONable # , MontyEncoder
from monty.string import is_string
from pymatgen.core.libxcfunc import LibxcFunc
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "3.0.0" # The libxc version used to generate this file!
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2016"
class XcFunc(MSONable):
"""
This object stores information about the XC correlation functional.
Client code usually creates the object by calling the class methods:
- from_name
- from_type_name
or code-specific methods such as:
- from_abinit_ixc
Ax XcFunc instance is hashable and can therefore be used as key in dictionaries.
The implementation is based on the libxc conventions
and is inspired to the XML specification for atomic PAW datasets documented at:
https://wiki.fysik.dtu.dk/gpaw/setups/pawxml.html
For convenience, part of the pawxml documentation is reported here.
The xc_functional element defines the exchange-correlation functional used for
generating the dataset. It has the two attributes type and name.
The type attribute can be LDA, GGA, MGGA or HYB.
The name attribute designates the exchange-correlation functional
and can be specified in the following ways:
[1] Taking the names from the LibXC library. The correlation and exchange names
are stripped from their XC_ part and combined with a + sign.
Here is an example for an LDA functional:
<xc_functional type="LDA", name="LDA_X+LDA_C_PW"/>
and this is what PBE will look like:
<xc_functional type="GGA", name="GGA_X_PBE+GGA_C_PBE"/>
[2] Using one of the following pre-defined aliases:
type name LibXC equivalent Reference
LDA PW LDA_X+LDA_C_PW LDA exchange; Perdew, Wang, PRB 45, 13244 (1992)
GGA PW91 GGA_X_PW91+GGA_C_PW91 Perdew et al PRB 46, 6671 (1992)
GGA PBE GGA_X_PBE+GGA_C_PBE Perdew, Burke, Ernzerhof, PRL 77, 3865 (1996)
GGA RPBE GGA_X_RPBE+GGA_C_PBE Hammer, Hansen, Nørskov, PRB 59, 7413 (1999)
GGA revPBE GGA_X_PBE_R+GGA_C_PBE Zhang, Yang, PRL 80, 890 (1998)
GGA PBEsol GGA_X_PBE_SOL+GGA_C_PBE_SOL Perdew et al, PRL 100, 136406 (2008)
GGA AM05 GGA_X_AM05+GGA_C_AM05 Armiento, Mattsson, PRB 72, 085108 (2005)
GGA BLYP GGA_X_B88+GGA_C_LYP Becke, PRA 38, 3098 (1988); Lee, Yang, Parr, PRB 37, 785
"""
type_name = namedtuple("type_name", "type, name")
xcf = LibxcFunc
defined_aliases = OrderedDict(
[ # (x, c) --> type_name
# LDAs
((xcf.LDA_X, xcf.LDA_C_PW), type_name("LDA", "PW")), # ixc 7
((xcf.LDA_X, xcf.LDA_C_PW_MOD), type_name("LDA", "PW_MOD")),
((xcf.LDA_X, xcf.LDA_C_PZ), type_name("LDA", "PZ")), # ixc 2
((xcf.LDA_X, xcf.LDA_C_WIGNER), type_name("LDA", "W")), # ixc 4
((xcf.LDA_X, xcf.LDA_C_HL), type_name("LDA", "HL")), # ixc 5
((xcf.LDA_X, xcf.LDA_C_GL), type_name("LDA", "GL")),
((xcf.LDA_X, xcf.LDA_C_VWN), type_name("LDA", "VWN")),
# GGAs
((xcf.GGA_X_PW91, xcf.GGA_C_PW91), type_name("GGA", "PW91")),
((xcf.GGA_X_PBE, xcf.GGA_C_PBE), type_name("GGA", "PBE")),
((xcf.GGA_X_RPBE, xcf.GGA_C_PBE), type_name("GGA", "RPBE")), # ixc 15
((xcf.GGA_X_PBE_R, xcf.GGA_C_PBE), type_name("GGA", "revPBE")), # ixc 14
((xcf.GGA_X_PBE_SOL, xcf.GGA_C_PBE_SOL), type_name("GGA", "PBEsol")),
((xcf.GGA_X_AM05, xcf.GGA_C_AM05), type_name("GGA", "AM05")),
((xcf.GGA_X_B88, xcf.GGA_C_LYP), type_name("GGA", "BLYP")),
]
)
del type_name
# Correspondence between Abinit ixc notation and libxc notation.
# see: http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#ixc
# and 42_libpaw/m_pawpsp.F90 for the implementation.
# Fortunately, all the other cases are handled with libxc.
abinitixc_to_libxc = {
1: dict(xc=xcf.LDA_XC_TETER93),
2: dict(x=xcf.LDA_X, c=xcf.LDA_C_PZ), # PZ 001009
4: dict(x=xcf.LDA_X, c=xcf.LDA_C_WIGNER), # W
5: dict(x=xcf.LDA_X, c=xcf.LDA_C_HL), # HL
7: dict(x=xcf.LDA_X, c=xcf.LDA_C_PW), # PW 001012
11: dict(x=xcf.GGA_X_PBE, c=xcf.GGA_C_PBE), # PBE
14: dict(x=xcf.GGA_X_PBE_R, c=xcf.GGA_C_PBE), # revPBE
15: dict(x=xcf.GGA_X_RPBE, c=xcf.GGA_C_PBE), # RPBE
}
del xcf
@classmethod
def aliases(cls):
"""List of registered names."""
return [nt.name for nt in cls.defined_aliases.values()]
@classmethod
def asxc(cls, obj):
"""Convert object into Xcfunc."""
if isinstance(obj, cls):
return obj
if is_string(obj):
return cls.from_name(obj)
raise TypeError(f"Don't know how to convert <{type(obj)}:{str(obj)}> to Xcfunc")
@classmethod
def from_abinit_ixc(cls, ixc):
"""Build the object from Abinit ixc (integer)"""
ixc = int(ixc)
if ixc == 0:
return None
if ixc > 0:
return cls(**cls.abinitixc_to_libxc[ixc])
# libxc notation employed in Abinit: a six-digit number in the form XXXCCC or CCCXXX
# ixc = str(ixc)
# assert len(ixc[1:]) == 6
# first, last = ixc[1:4], ixc[4:]
ixc = abs(ixc)
first = ixc // 1000
last = ixc - first * 1000
x, c = LibxcFunc(int(first)), LibxcFunc(int(last))
if not x.is_x_kind:
x, c = c, x # Swap
assert x.is_x_kind and c.is_c_kind
return cls(x=x, c=c)
@classmethod
def from_name(cls, name):
"""Build the object from one of the registered names"""
return cls.from_type_name(None, name)
@classmethod
def from_type_name(cls, typ, name):
"""Build the object from (type, name)."""
# Try aliases first.
for k, nt in cls.defined_aliases.items():
if typ is not None and typ != nt.type:
continue
# print(name, nt.name)
if name == nt.name:
if len(k) == 1:
return cls(xc=k)
if len(k) == 2:
return cls(x=k[0], c=k[1])
raise ValueError("Wrong key: %s" % k)
# At this point, we should have something in the form
# name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93"
if "+" in name:
# if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
x, c = (s.strip() for s in name.split("+"))
x, c = LibxcFunc[x], LibxcFunc[c]
return cls(x=x, c=c)
# if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
xc = LibxcFunc[name]
return cls(xc=xc)
@classmethod
def from_dict(cls, d):
"""
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
"""
return cls(xc=d.get("xc"), x=d.get("x"), c=d.get("c"))
def as_dict(self):
"""
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
"""
d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__}
# print("in as_dict", type(self.x), type(self.c), type(self.xc))
if self.x is not None:
d["x"] = self.x.as_dict()
if self.c is not None:
d["c"] = self.c.as_dict()
if self.xc is not None:
d["xc"] = self.xc.as_dict()
return d
# def to_json(self):
# """
# Returns a json string representation of the MSONable object.
# """
# return json.dumps(self.as_dict()) #, cls=MontyEncoder)
def __init__(self, xc=None, x=None, c=None):
"""
Args:
xc: LibxcFunc for XC functional.
x, c: LibxcFunc for exchange and correlation part. Mutually exclusive with xc.
"""
# Consistency check
if xc is None:
if x is None or c is None:
raise ValueError("x or c must be specified when xc is None")
else:
if x is not None or c is not None:
raise ValueError("x and c should be None when xc is specified")
self.xc, self.x, self.c = xc, x, c
@lazy_property
def type(self):
"""The type of the functional."""
if self.xc in self.defined_aliases:
return self.defined_aliases[self.xc].type
xc = (self.x, self.c)
if xc in self.defined_aliases:
return self.defined_aliases[xc].type
# If self is not in defined_aliases, use LibxcFunc family
if self.xc is not None:
return self.xc.family
return "+".join([self.x.family, self.c.family])
@lazy_property
def name(self):
"""
The name of the functional. If the functional is not found in the aliases,
the string has the form X_NAME+C_NAME
"""
if self.xc in self.defined_aliases:
return self.defined_aliases[self.xc].name
xc = (self.x, self.c)
if xc in self.defined_aliases:
return self.defined_aliases[xc].name
if self.xc is not None:
return self.xc.name
return "+".join([self.x.name, self.c.name])
def __repr__(self):
return "%s" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if other is None:
return False
if isinstance(other, XcFunc):
return self.name == other.name
# assume other is a string
return self.name == other
def __ne__(self, other):
return not self == other
# @property
# def refs(self):
# def info_dict()
# if self.xc is not None:
# return {"xc", self.xc.info_dict}
# else:
# return {"x", self.x.info_dict, "c", self.c.info_dict}
| vorwerkc/pymatgen | pymatgen/core/xcfunc.py | Python | mit | 10,413 | [
"ABINIT",
"GPAW",
"pymatgen"
] | 39a6073e1f9b744a95d58f30934870687d71b0b0f2b69c92e3fd44ea17bc03ae |
"""
DIRAC.WorkloadManagementSystem.private package
"""
| DIRACGrid/DIRAC | src/DIRAC/WorkloadManagementSystem/private/__init__.py | Python | gpl-3.0 | 58 | [
"DIRAC"
] | ce7c40c9670722eadb7b9b6feab65293a6c297f7113032d8a3dcde7b81aa5cce |
from __future__ import print_function
import os
import unittest
from rdkit.six.moves import cStringIO as StringIO
from rdkit import RDConfig
from rdkit.Chem import PandasTools
gotPandas = PandasTools.pd is not None
import numpy
import tempfile, shutil
import gzip
methane = """\
Methane
RDKit
1 0 0 0 0 0 0 0 0 0999 V2000
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
M END
> <prop1>
12.34
> <prop2>
qwe
$$$$
"""
peroxide = """\
Peroxide
RDKit
2 1 0 0 0 0 0 0 0 0999 V2000
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
M END
> <prop2>
rtz
> <prop3>
yxcv
$$$$
"""
class TestLoadSDF(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not gotPandas:
raise unittest.SkipTest("Pandas not installed, skipping...")
def setUp(self):
self.gz_filename = os.path.join(RDConfig.RDCodeDir, 'Chem/test_data', 'pandas_load.sdf.gz')
# the doctest tests loading from a ".sdf" file so there's no need for that test here
def test_load_gzip_file(self):
df = PandasTools.LoadSDF(self.gz_filename)
self.assertEqual(len(df), 13)
# The molecule with index 1 is invalid, so it should be missing form the index
self.assertEqual(list(df.index), [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
def test_load_from_sio(self):
sio = StringIO(methane + peroxide)
df = PandasTools.LoadSDF(sio)
self.assertEqual(len(df), 2)
self.assertEqual(list(df["ID"]), ["Methane", "Peroxide"])
atom_counts = [mol.GetNumAtoms() for mol in df["ROMol"]]
self.assertEqual(atom_counts, [1, 2])
def test_load_specify_column_names(self):
sio = StringIO(methane + peroxide)
df = PandasTools.LoadSDF(sio, idName="CorpID", molColName="_rdmol")
self.assertEqual(len(df), 2)
self.assertEqual(list(df["CorpID"]), ["Methane", "Peroxide"])
atom_counts = [mol.GetNumAtoms() for mol in df["_rdmol"]]
self.assertEqual(atom_counts, [1, 2])
def test_empty_file(self):
# Should return an empty data frame with no rows or columns
sio = StringIO()
df = PandasTools.LoadSDF(sio)
self.assertEqual(len(df), 0)
self.assertEqual(len(df.index), 0)
def test_passed_in_file_is_not_closed(self):
sio = StringIO(methane)
df = PandasTools.LoadSDF(sio)
self.assertEqual(len(df), 1)
self.assertFalse(sio.closed)
def test_properties(self):
sio = StringIO(peroxide + methane)
df = PandasTools.LoadSDF(sio)
self.assertEqual(set(df.columns), set("ROMol ID prop1 prop2 prop3".split()))
prop1 = list(df["prop1"])
self.assertTrue(numpy.isnan(prop1[0]), prop1[0])
self.assertEqual(prop1[1], "12.34")
self.assertEqual(list(df["prop2"]), ["rtz", "qwe"])
prop3 = list(df["prop3"])
self.assertEqual(prop3[0], "yxcv")
self.assertTrue(numpy.isnan(prop3[1]), prop3[1])
def test_ignore_mol_column(self):
sio = StringIO(peroxide + methane)
df = PandasTools.LoadSDF(sio, molColName=None)
self.assertEqual(set(df.columns), set("ID prop1 prop2 prop3".split()))
class TestWriteSDF(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not gotPandas:
raise unittest.SkipTest("Pandas not installed, skipping...")
def setUp(self):
sio = StringIO(methane + peroxide)
self.df = PandasTools.LoadSDF(sio)
def test_default_write_does_not_include_tags(self):
sio = StringIO()
PandasTools.WriteSDF(self.df, sio)
s = sio.getvalue()
self.assertNotIn(s, "prop2")
def test_identifier_from_a_column(self):
sio = StringIO()
PandasTools.WriteSDF(self.df, sio, idName="prop2")
s = sio.getvalue()
first_line = s.split("\n", 1)[0]
self.assertEqual(first_line, "qwe")
def test_all_numeric_with_no_numeric_columns(self):
sio = StringIO()
PandasTools.WriteSDF(self.df, sio, allNumeric=True)
s = sio.getvalue()
self.assertFalse(">" in s, s)
self.assertNotIn("7\n\n", s) # double-check that the numeric tests don't pass by accident
self.assertNotIn("8\n\n", s)
def test_all_numeric_with_numeric_columns(self):
sio = StringIO()
df = self.df
df["len"] = df["ID"].map(len)
PandasTools.WriteSDF(df, sio, allNumeric=True)
s = sio.getvalue()
self.assertEqual(s.count("<len>"), 2)
self.assertIn("7\n\n", s)
self.assertIn("8\n\n", s)
def test_specify_numeric_column(self):
sio = StringIO()
df = self.df
df["len2"] = df["ID"].map(len)
PandasTools.WriteSDF(df, sio, properties=["len2"])
s = sio.getvalue()
self.assertEqual(s.count("<len2>"), 2)
self.assertIn("7\n\n", s)
self.assertIn("8\n\n", s)
def test_specify_numeric_column(self):
sio = StringIO()
df = self.df
df["len2"] = df["ID"].map(len)
df["len3"] = df["len2"].map(float)
PandasTools.WriteSDF(df, sio, properties=["len2", "len3"])
s = sio.getvalue()
self.assertEqual(s.count("<len2>"), 2)
self.assertEqual(s.count("<len3>"), 2)
self.assertIn("7\n\n", s)
self.assertIn("7.0\n\n", s)
self.assertIn("8\n\n", s)
self.assertIn("8.0\n\n", s)
def test_write_to_sdf(self):
dirname = tempfile.mkdtemp()
try:
filename = os.path.join(dirname, "test.sdf")
PandasTools.WriteSDF(self.df, filename)
s = open(filename, "U").read()
self.assertEqual(s.count("\n$$$$\n"), 2)
self.assertEqual(s.split("\n", 1)[0], "Methane")
finally:
shutil.rmtree(dirname)
def test_write_to_sdf_gz(self):
dirname = tempfile.mkdtemp()
try:
filename = os.path.join(dirname, "test.sdf.gz")
PandasTools.WriteSDF(self.df, filename)
s = gzip.open(filename).read()
self.assertEqual(s.count("\n$$$$\n"), 2)
self.assertEqual(s.split("\n", 1)[0], "Methane")
finally:
shutil.rmtree(dirname)
if __name__ == '__main__':
if PandasTools.pd is None:
import sys
sys.exit(0)
from rdkit.six import PY3
if not PY3: # FIX: The StringIO tests fail on python3
unittest.main()
| adalke/rdkit | rdkit/Chem/UnitTestPandasTools.py | Python | bsd-3-clause | 6,573 | [
"RDKit"
] | 80751b52326370c6bd76da4b4c79a627fc5424d0feb2a612423f8f2c0e83974f |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkOBJReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkOBJReader(), 'Reading vtkOBJ.',
(), ('vtkOBJ',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkOBJReader.py | Python | bsd-3-clause | 464 | [
"VTK"
] | 3a607c04eade7e95edf9d0137e9360a31196fa6547b74bad61372be909a77cea |
""" Useful physical and mathematical values. Physical constants in
Gaussian cgs units when not indicated otherwise. From 2010 CODATA
recommended values where available (see
http://physics.nist.gov/cuu/Constants/index.html).
>>> import constants as c
>>> from math import sqrt
>>> Planck_length = sqrt(c.hbar * c.G / c.c**3) # cm
>>> Planck_mass = sqrt(c.hbar * c.c / c.G) # g
>>> Planck_time = sqrt(c.hbar * c.G / c.c**5) # s
Constants defined:
======== ===================== =============== ===============================
c 2.99792458e10 cm/s speed of light
G 6.67384e-8 cm^3/g/s^2 gravitational constant
hplanck 6.6260775e-27 erg s Planck's constant
hbar 1.054571726e-27 erg s 1/(4*pi) * Planck's constant
kboltz 1.3806488e-16 erg/K Boltzmann constant
mp 1.67261777e-24 g proton mass
me 9.10938291e-28 g electron mass
eV 1.602176565e-12 ergs electron volt
e 4.80320451e-10 esu magnitude of charge on electron
sigma 5.670373e-5 erg/s/cm^2/K^4 Stefan-Boltzmann constant
Ryd 2.179872171e-11 ergs Rydberg: energy needed to
dissociate H atom from
ground state
Jy 1e-23 ergs/s/cm^2/Hz Jansky
sigmaT 6.652458734e-25 cm^2 Thomson cross section
Mmoon 7.348e25 g Moon mass
Rmoon 1.7374e8 cm Moon radius
Mearth 5.9742e27 g Earth mass
Rearth 6.3781e8 cm Earth radius
Msun 1.989e33 g Solar mass
Lsun 3.90e33 erg/s Solar luminosity
Rsun 6.96e10 cm Solar radius
au 1.496e13 cm Distance from Earth to Sun
ly 9.4607304725808e16 cm light year
pc 3.08567802e18 cm parsec
kpc 3.08567802e21 cm kiloparsec
Mpc 3.08567802e24 cm megaparsec
yr 3.155815e7 s year
Gyr 3.155815e16 s gigayear
mu 0.62 unitless mean molecular weight of
astrophysical gas
mile 160934. cm mile
a0 hbar**2 / me / e**2 cm Bohr radius
alpha e**2 / (hbar*c) unitless Fine structure constant
Ryd_Ang h * c * 1.0e8 / Ryd Angstroms Rydberg in Angstroms
c_kms 2.99792458e5 km/s speed of light
sqrt_ln2 0.832554611158 sqrt(ln(2))
pi
wlya 1215.6701 Angstroms Wavelength of HI Lya transition
wlyb 1025.72 Angstroms Wavelength of HI Lyb transition
Ar dictionary of atomic weights
======== ===================== =============== ===============================
"""
from __future__ import division
from __future__ import unicode_literals
from math import pi
c = 2.99792458e10 # cm/s speed of light
G = 6.67384e-8 # cm^3/g/s^2 gravitational constant
hplanck = 6.6260775e-27 # erg s Planck's constant
hbar = 1.054571726e-27 # erg s 1/(4*pi) * Planck's constant
kboltz = 1.3806488e-16 # erg/K Boltzmann constant
mp = 1.67261777e-24 # g proton mass
me = 9.10938291e-28 # g electron mass
eV = 1.602176565e-12 # ergs electron volt
e = 4.80320451e-10 # esu magnitude of charge on electron
sigma = 5.670373e-5 # erg/s/cm^2/K^4 Stefan-Boltzmann constant
Ryd = 2.179872171e-11 # ergs Rydberg: energy needed to
# dissociate H atom from
# ground state
Jy = 1e-23 # ergs/s/cm^2/Hz Jansky
sigmaT = 6.652458734e-25 # cm^2 Thomson cross section
Mmoon = 7.348e25 # g Moon mass
Rmoon = 1.7374e8 # cm Moon radius
Mearth = 5.9742e27 # g Earth mass
Rearth = 6.3781e8 # cm Earth radius
Msun = 1.989e33 # g Solar mass
Lsun = 3.90e33 # erg/s Solar luminosity
Rsun = 6.96e10 # cm Solar radius
au = 1.496e13 # cm Distance from Earth to Sun
ly = 9.4607304725808e16 # cm light year
pc = 3.08567802e18 # cm parsec
kpc = 3.08567802e21 # cm kiloparsec
Mpc = 3.08567802e24 # cm megaparsec
yr = 3.155815e7 # s year
Gyr = 3.155815e16 # s gigayear
mu = 0.62 # unitless mean molecular weight of
# astrophysical gas
mile = 160934. # cm mile
a0 = hbar**2 / me / e**2 # cm Bohr radius
alpha = e**2 / (hbar*c) # unitless Fine structure constant
Ryd_Ang = hplanck * c * 1.0e8 / Ryd # Angstroms Rydberg in Angstroms
c_kms = 2.99792458e5 # km/s speed of light
sqrt_ln2 = 0.832554611158 # sqrt(ln(2))
wlya = 1215.6701 # Angstroms Wavelength of HI Lya transition
wlyb = 1025.72 # Angstroms Wavelength of HI Lyb transition
# atomic weights from http://www.nist.gov/pml/data/comp.cfm
Ar = dict(H=1.00794,
He=4.002602,
C=12.0107,
N=14.0067,
O=15.9994,
Mg=24.3050,
Al=26.9815386,
Si=28.0855,
P=30.973762,
S=32.065,
Ca=40.078,
Fe=55.845,
Ti=47.867,
Zn=65.38,
Cr=51.9961,
)
| nhmc/LAE | python_modules/barak/constants.py | Python | mit | 6,258 | [
"Gaussian"
] | 654eaea6140e78fb30cfbf19848a991419e72b8f70652dfa32f7706e8c4150cc |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGenomicalignments(RPackage):
"""Provides efficient containers for storing and manipulating short genomic
alignments (typically obtained by aligning short reads to a reference
genome). This includes read counting, computing the coverage, junction
detection, and working with the nucleotide content of the alignments."""
homepage = "https://bioconductor.org/packages/GenomicAlignments/"
git = "https://git.bioconductor.org/packages/GenomicAlignments.git"
version('1.14.2', commit='57b0b35d8b36069d4d94af86af051f0129b28eef')
version('1.12.2', commit='b5d6f19e4a89b6c1c3e9e58e5ea4eb13870874ef')
depends_on('r-biocgenerics@0.15.3:', type=('build', 'run'))
depends_on('r-s4vectors@0.13.13:', type=('build', 'run'))
depends_on('r-iranges@2.5.36:', type=('build', 'run'), when='@1.12.2')
depends_on('r-iranges@2.11.16:', type=('build', 'run'), when='@1.14.2')
depends_on('r-genomeinfodb@1.11.5:', type=('build', 'run'), when='@1.12.2')
depends_on('r-genomeinfodb@1.13.1:', type=('build', 'run'), when='@1.14.2')
depends_on('r-genomicranges@1.27.19:', type=('build', 'run'), when='@1.12.2')
depends_on('r-genomicranges@1.29.14:', type=('build', 'run'), when='@1.14.2')
depends_on('r-summarizedexperiment@1.5.3:', type=('build', 'run'))
depends_on('r-biostrings@2.37.1:', type=('build', 'run'))
depends_on('r-rsamtools@1.21.4:', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.12.2:')
| mfherbst/spack | var/spack/repos/builtin/packages/r-genomicalignments/package.py | Python | lgpl-2.1 | 2,795 | [
"Bioconductor"
] | 5367886d4b33160c687992b7329d4eef83b9d73e191e5c5209d2060296721dad |
import calendar
import json
import re
import uuid
import mock
from nose.tools import eq_, ok_
from django.core.cache import cache
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse
from airmozilla.main.models import Event
from airmozilla.base.tests.testbase import Response, DjangoTestCase
from airmozilla.comments.views import (
can_manage_comments,
get_latest_comment
)
from airmozilla.comments.models import (
Discussion,
Comment,
Unsubscription
)
from airmozilla.base.tests.test_mozillians import (
VOUCHED_FOR_USERS,
VOUCHED_FOR,
)
class TestComments(DjangoTestCase):
def _create_discussion(self, event, enabled=True, moderate_all=True,
notify_all=True):
return Discussion.objects.create(
event=event,
enabled=enabled,
moderate_all=moderate_all,
notify_all=notify_all
)
def test_can_manage_comments(self):
event = Event.objects.get(title='Test event')
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
richard = User.objects.create(username='richard',
email='richard@mozilla.com',
is_superuser=True)
discussion = self._create_discussion(event)
discussion.moderators.add(jay)
ok_(not can_manage_comments(bob, discussion))
ok_(can_manage_comments(jay, discussion))
ok_(can_manage_comments(richard, discussion))
def test_get_latest_comment(self):
event = Event.objects.get(title='Test event')
eq_(get_latest_comment(event), None)
# or by ID
eq_(get_latest_comment(event.pk), None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
latest = get_latest_comment(event)
eq_(latest, None)
latest = get_latest_comment(event, include_posted=True)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(latest, modified)
# again, or by event ID
latest_second_time = get_latest_comment(event.pk, include_posted=True)
eq_(latest, latest_second_time)
def test_basic_event_data(self):
event = Event.objects.get(title='Test event')
# render the event and there should be no comments
url = reverse('main:event', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' not in response.content)
# if not enabled you get that back in JSON
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(comments_url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], False)
# also, trying to post a comment when it's not enable
# should cause an error
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 400)
assert not self.fanout.mock_calls
# enable discussion
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@example.com')
discussion.moderators.add(jay)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' in response.content)
comments_url = reverse('comments:event_data', args=(event.pk,))
# add the {'since': 1} to bust the cache
response = self.client.get(comments_url, {'since': 1})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], True)
eq_(structure['discussion']['closed'], False)
ok_('No comments posted' in structure['html'])
# even though it's enabled, it should reject postings
# because we're not signed in
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 403)
# so, let's sign in and try again
User.objects.create_user('richard', password='secret')
# but it should be ok if self.user had the add_event permission
assert self.client.login(username='richard', password='secret')
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('No comments posted' not in structure['html'])
ok_('Bla bla' in structure['html'])
comment = Comment.objects.get(comment='Bla bla')
ok_(comment)
eq_(comment.status, Comment.STATUS_POSTED)
self.fanout.publish.assert_called_with(
'comments-{}'.format(event.id),
comment.id
)
# the moderator should now have received an email
email_sent = mail.outbox[-1]
ok_(event.title in email_sent.subject)
ok_('requires moderation' in email_sent.subject)
ok_(url in email_sent.body)
ok_(url + '#comment-%d' % comment.pk in email_sent.body)
def test_cached_event_data(self):
cache.clear()
event = Event.objects.get(title='Test event')
# enable discussion
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@example.com')
discussion.moderators.add(jay)
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], True)
eq_(structure['discussion']['closed'], False)
ok_('No comments posted' in structure['html'])
# Post a comment
Comment.objects.create(
event=event,
user=jay,
comment='Cool birds!',
status=Comment.STATUS_APPROVED,
)
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
# Same as before
ok_('No comments posted' in structure['html'])
# Add some noise
response = self.client.get(url, {'noise': 1})
eq_(response.status_code, 200)
structure = json.loads(response.content)
# Now it should be different
ok_('No comments posted' not in structure['html'])
ok_('Cool birds!' in structure['html'])
# Add another comment but keep the same noise
Comment.objects.create(
event=event,
user=jay,
comment='Flamingos Rock!',
status=Comment.STATUS_APPROVED,
)
# Same noise as on the last one
response = self.client.get(url, {'noise': 1})
eq_(response.status_code, 200)
structure = json.loads(response.content)
# Should remain the same
ok_('Cool birds!' in structure['html'])
ok_('Flamingos Rock!' not in structure['html'])
# Change the noise to double-check that the value of the noise matters
response = self.client.get(url, {'noise': 2})
eq_(response.status_code, 200)
structure = json.loads(response.content)
# Should be different
ok_('Cool birds!' in structure['html'])
ok_('Flamingos Rock!' in structure['html'])
def test_post_comment_no_moderation(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event, moderate_all=False)
User.objects.create_user('richard', password='secret')
assert self.client.login(username='richard', password='secret')
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
# structure = json.loads(response.content)
comment = Comment.objects.get(event=event)
eq_(comment.status, Comment.STATUS_APPROVED)
def test_moderation_immediately(self):
"""when you post a comment that needs moderation, the moderator
can click a link in the email notification that immediately
approves the comment without being signed in"""
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
identifier = uuid.uuid4().hex[:10]
cache.set('approve-%s' % identifier, comment.pk, 60)
cache.set('remove-%s' % identifier, comment.pk, 60)
approve_url = reverse(
'comments:approve_immediately',
args=(identifier, comment.pk)
)
remove_url = reverse(
'comments:remove_immediately',
args=(identifier, comment.pk)
)
response = self.client.get(approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_APPROVED)
response = self.client.get(remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_REMOVED)
# try with identifiers that aren't in the cache
bogus_identifier = uuid.uuid4().hex[:10]
bogus_approve_url = reverse(
'comments:approve_immediately',
args=(bogus_identifier, comment.pk)
)
bogus_remove_url = reverse(
'comments:remove_immediately',
args=(bogus_identifier, comment.pk)
)
response = self.client.get(bogus_approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' not in response.content)
ok_('Unable to Approve Comment' in response.content)
response = self.client.get(bogus_remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' not in response.content)
ok_('Unable to Remove Comment' in response.content)
def test_unsubscribe_on_reply_notifications(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'name': 'Jay',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('Bla bla' in structure['html'])
ok_('I think this' in structure['html'])
# now, we must approve this comment
new_comment = Comment.objects.get(
comment='I think this',
user=jay
)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
email_sent = mail.outbox[-1]
ok_('Reply' in email_sent.subject)
ok_(event.title in email_sent.subject)
eq_(email_sent.to, ['bob@mozilla.com'])
# expect there to be two unsubscribe links in there
url_unsubscribe = re.findall(
'/comments/unsubscribe/\w{10}/\d+/',
email_sent.body
)[0]
urls_unsubscribe_all = re.findall(
'/comments/unsubscribe/\w{10}/',
email_sent.body
)
for url in urls_unsubscribe_all:
if not url_unsubscribe.startswith(url):
url_unsubscribe_all = url
self.client.logout()
# now let's visit these
response = self.client.get(url_unsubscribe)
eq_(response.status_code, 200)
ok_('Are you sure' in response.content)
response = self.client.post(url_unsubscribe, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion=discussion
)
unsubscribed_url = reverse(
'comments:unsubscribed',
args=(discussion.pk,)
)
ok_(unsubscribed_url in response['location'])
response = self.client.get(unsubscribed_url)
eq_(response.status_code, 200)
ok_('Unsubscribed' in response.content)
ok_(event.title in response.content)
response = self.client.post(url_unsubscribe_all, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion__isnull=True
)
unsubscribed_url = reverse('comments:unsubscribed_all')
ok_(unsubscribed_url in response['location'])
def test_unsubscribed_reply_notifications_discussion(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
discussion=discussion
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
ok_(not mail.outbox)
def test_unsubscribed_reply_notifications_all(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
ok_(not mail.outbox)
def test_invalid_reply_to(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': '999999999',
})
eq_(response.status_code, 400)
@mock.patch('logging.error')
@mock.patch('requests.get')
def test_fetch_user_name(self, rget, rlogging):
cache.clear()
def mocked_get(url, **options):
if '/v2/users/99999' in url:
return Response(VOUCHED_FOR)
if 'peterbe' in url:
return Response(VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('comments:user_name')
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe = User.objects.create_user(
username='peterbe', password='secret'
)
assert self.client.login(username='peterbe', password='secret')
print("URL", url)
response = self.client.get(url)
print(response)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe.email = 'peterbe@mozilla.com'
peterbe.save()
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], 'Peter Bengtsson')
def test_modify_comment_without_permission(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
# and not being logged in you definitely can't post comments
response = self.client.post(url, {
'comment': "My opinion",
})
eq_(response.status_code, 403)
User.objects.create_user(username='jay', password='secret')
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 403)
# but you can flag
response = self.client.post(url, {
'flag': comment.pk,
})
eq_(response.status_code, 200)
# but not unflag
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 403)
def test_modify_comment_with_permission(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
jay = User.objects.create_user(username='jay', password='secret')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED,
flagged=1
)
url = reverse('comments:event_data', args=(event.pk,))
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_APPROVED))
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_POSTED))
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_REMOVED))
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(flagged=0))
def test_event_data_latest_400(self):
cache.clear()
event = Event.objects.get(title='Test event')
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
discussion = self._create_discussion(event)
discussion.enabled = False
discussion.save()
response = self.client.get(url)
eq_(response.status_code, 400)
def test_event_data_latest(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
user=bob,
event=event,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
response = self.client.get(url, {'include_posted': True})
eq_(response.status_code, 200)
structure = json.loads(response.content)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(structure['latest_comment'], modified)
# ask it again and it should be the same
response_second = self.client.get(url, {'include_posted': True})
eq_(response_second.status_code, 200)
eq_(response.content, response_second.content)
| mozilla/airmozilla | airmozilla/comments/tests/test_views.py | Python | bsd-3-clause | 23,872 | [
"VisIt"
] | 738261c90eb95bbd5b95c8f3da992e12ab51361c973c1444f3f5156aebf36e78 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = name.split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
For an example, see `TFAPIChangeSpec`.
"""
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
warning_message = function_warnings[full_name]
warning_message = warning_message.replace("<function name>", full_name)
self.add_log(WARNING, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name,
warning_message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + name)
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
used_args = [kw.arg for kw in node.keywords]
for (kwarg, arg), warning in arg_warnings.items():
if kwarg in used_args or len(node.args) > arg:
warned = True
warning_message = warning.replace("<function name>", full_name or name)
self.add_log(WARNING, node.lineno, node.col_offset,
"%s called with %s argument requires manual check: %s." %
(full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
reordered = function_reorders[full_name]
new_keywords = []
for idx, arg in enumerate(node.args):
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node:
if new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return in_filename + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
logs = [self.format_log(log, None) for log in visitor.log]
errors = [self.format_log(error, in_filename)
for error in visitor.warnings_and_errors]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
text += "\n".join(log)
text += "-" * 80 + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files, in_place):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
in_place: Allow the conversion of an entire directory in place.
Returns:
A tuple of files processed, the report string ofr all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
if in_place:
return self.process_tree_inplace(root_directory)
else:
print("In order to copy a directory in place the `--inplace` input "
"arg must be set to `True`.")
sys.exit(1)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [os.path.join(dir_name,
f) for f in file_list if f.endswith(".py")]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for path in files_to_process:
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| apark263/tensorflow | tensorflow/tools/compatibility/ast_edits.py | Python | apache-2.0 | 22,130 | [
"VisIt"
] | 051969d0aae4b5a4c9b785d2469454df8ddb1206d282f564fc79bc13226d251a |
import sys
import os, os.path
import string
import traceback
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
import LinkExtractor
class LinkListener:
def hrefReference(self, target, line):
raise NotImplementedError
def imageReference(self, imageFileName, line):
raise NotImplementedError
class LinkChecker(LinkListener):
### Record which files we have seen so that we don't get into an
# infinite loop and for efficiency. The absolute path is stored here
# to uniquely identify the files. That is, a file can be arrived
# at from many different locations such as help.html from .
# and ../help.html from a directory below.
#
# This table is shared by all instances of LinkChecker.
#
visited = {}
### A table of the images visited by any document; a cache of correctness
imgVisited = {}
recursionDepth = 0
separator = "/" # not OS sensitive in HTML
localSeparator = None
def __init__(self, document):
self.document = document
self.directory = "."
LinkChecker.localSeparator = os.sep
def checkLinkRules(self, fName, line):
# Check case of path (check for UNIX compatibility on a PC)!
offensive = LinkChecker.offensivePathMember(self.directory + separator + fName)
if offensive:
file_ = ''
try:
f = file(offensive)
file_ = os.path.normcase(offensive)
self.error("Case mismatch in reference " + fName + ":" +
os.sep + "\treal name is " +
os.path.basename(file_) + os.sep +
"\treal absolute path is " + file_, line);
return False
except Exception, e:
self.error("internal error: cannot get canonical name for " +
offensive, line);
if LinkChecker.pathIsAbsolute(fName):
self.error("Reference to " + fName + " with absolute path", line);
return False;
return True
def doCheck(self):
if self.document[-5:] != ".html":
return
# prevent infinite recursion to this file
if LinkChecker.isVisited(self.document):
return
LinkChecker.visit(self.document)
LinkChecker.recursionDepth += 1
f = file(self.document)
lexer = LinkExtractor.Lexer(f)
lexer.addLinkListener(self)
# this will parse whole file since all tokens are skipped
lexer.nextToken()
LinkChecker.recursionDepth -= 1
def error(self, err, line):
d = "<internal error>"
try:
# f = file(self.document)
d = os.path.normcase(self.document)
except Exception, e:
sys.stderr.write("internal error: cannot find file that has error\n")
sys.exit(0)
sys.stderr.write(d + ":" + str(line) + ": error: " + err + '\n')
def pathIsAbsolute(path):
return path[0] == '/' or path[1] == ':'
pathIsAbsolute = staticmethod(pathIsAbsolute)
def fileProtocolURL(target):
return target.find("://") == -1 and \
not target[:7] == "mailto:" and \
not target[:5] == "news:"
fileProtocolURL = staticmethod(fileProtocolURL)
def getParent(path):
return os.path.join(os.path.split(path)[:-1])
getParent = staticmethod(getParent)
def hrefReference(self, target, line):
sys.stdout.write(self.document + ":" + str(line) + ": href to " + target + '\n')
# recursively check the target document unless non-file ref
if LinkChecker.fileProtocolURL(target):
# prune off any #name reference on end of file
pound = target.find('#')
path = target
if pound != -1:
path = target[:pound] # rip off #name on end, leave file
if not len(path):
return # ref to name in this file
# first check existence on disk
f = self.directory + os.sep + path
if not os.path.exists(f):
self.error("Reference to missing file " + path, line)
return
# check the case
self.checkLinkRules(path, line);
try:
# Link is ok, now follow the link
chk = LinkChecker.Lexer(self.directory + os.sep + path)
chk.doCheck()
except Exception, e:
self.error("Document does not exist: " + target, line)
def imageLinkIsOk(file_):
# f = file(file_)
f = os.path.normcase(file_)
b = f in LinkChecker.imgVisited.keys()
if b:
return True
return False
imageLinkIsOk = staticmethod(imageLinkIsOk)
def imageReference(self, imageFileName, line):
# first check if we have seen this exact file
try:
if LinkChecker.imageLinkIsOk(self.directory + os.sep + imageFileName):
return
f = self.directory + os.sep + imageFileName
if not os.path.exists(f):
self.error("Reference to missing file " + imageFileName, line);
return;
if self.checkLinkRules(imageFileName, line):
LinkChecker.visitImage(self.directory + os.sep + imageFileName)
except Exception, e:
sys.stderr.write("internal error: " + str(e) + '\n')
###
# Given a path to a file or dir, is the case of the reference
# the same as the actual path on the disk? This is only
# meaningful on a PC which is case-insensitive (not a real
# file system).
#
# Returns null if there is nothing offensive and the file exists.
# Returns offending file/dir if it does not exist or
# it has there is a case mismatch for it. The last file is checked
# first followed by the parent directory, recursively, all the way
# to the absolute or relative path root in that String; i.e., we parse
# from right to left.
#
# Because the File object won't actually go get the real filename
# from the disk so we can compare, we must get a directory listing
# of the directory and then look for the referenced file or dir.
# For example, for "./images/logo.gif" we would check "./images" dir
# listing for "logo.gif" with the appropriate case and then check
# directory "." for a dir called images with the right case. When
# no parent exists, we can stop looking for case problems.
def offensivePathMember(fName):
sys.stdout.write("caseMismatch(" + fName + ")\n");
# have we reached the root? (stopping condition)
if not fName or not LinkChecker.getParent(fName):
return None
parent = LinkChecker.getParent(fName)
fName = os.path.basename(fName)
# f = file(parent)
parentFiles = os.path.split(parent)
sys.stdout.write("checking dir " + parent + " for " + fName + '\n')
# handle weird stuff like "c:/doc/../foo"; skip this parent dir
if fName == "..":
return LinkChecker.offensivePathMember(LinkChecker.getParent(parent))
for i in range(len(parentFiles)):
sys.stdout.write("is it " + parentFiles[i] + "?\n")
if string.lower(parentFiles[i]) == fName:
if not parentFiles[i] == fName:
sys.stdout.write("case mismatch " + fName + " in " +
parent + '\n')
return parent + LinkChecker.separator + fName
# found a match, verify parent is ok
return LinkChecker.offensivePathMember(parent)
sys.stdout.write("can't find " + fName + " in " + parent + '\n')
return parent + LinkChecker.separator + fName
offensivePathMember = staticmethod(offensivePathMember)
def visit(file_):
# f = file(file_)
f = os.path.normcase(file_)
LinkChecker.visited[f] = True
visit = staticmethod(visit)
def isVisited(file_):
# f = file(file_)
f = os.path.normcase(file_)
return f in LinkChecker.visited.keys()
isVisited = staticmethod(isVisited)
def visitImage(file_):
# f = file(file_)
f = os.path.normcase(file_)
sys.stdout.write("caching image " + f + '\n')
LinkChecker.imgVisited[f] = True
visitImage = staticmethod(visitImage)
class Main:
def __init__(self):
chk = LinkChecker(sys.argv[1])
try:
chk.doCheck()
except Exception, e:
sys.stderr.write("Exception: " + str(e) + '\n');
apply(traceback.print_exception, sys.exc_info())
if __name__ == "__main__":
Main()
| rmartinho/boo | lib/antlr-2.7.5/examples/python/linkChecker/LinkChecker.py | Python | bsd-3-clause | 7,759 | [
"VisIt"
] | d9d710a9268dbba51d3f454a9be7babb2cb58bf014b6b7c4d572de7d50550d6e |
from ase.data.molecules import molecule
from gpaw import GPAW
from gpaw import dscf
from gpaw.test import equal
# Ground state calculation
#------------------------------------------------------------------
calc_mol = GPAW(nbands=8, h=0.2, xc='PBE', spinpol=True,
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-9,
'bands': -1})
CO = molecule('CO')
CO.center(vacuum=3)
CO.set_calculator(calc_mol)
E_gs = CO.get_potential_energy()
niter_gs = calc_mol.get_number_of_iterations()
'''Get the pseudowavefunctions and projector overlaps of the
state which is to be occupied. n=5,6 is the 2pix and 2piy orbitals'''
n = 5
molecule = [0,1]
wf_u = [kpt.psit_nG[n] for kpt in calc_mol.wfs.kpt_u]
p_uai = [dict([(molecule[a], P_ni[n]) for a, P_ni in kpt.P_ani.items()])
for kpt in calc_mol.wfs.kpt_u]
# Excited state calculations
#--------------------------------------------
calc_1 = GPAW(nbands=8, h=0.2, xc='PBE', spinpol=True,
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-9,
'bands': -1})
CO.set_calculator(calc_1)
weights = {0: [0.,0.,0.,1.], 1: [0.,0.,0.,-1.]}
lumo = dscf.MolecularOrbital(calc_1, weights=weights)
dscf.dscf_calculation(calc_1, [[1.0, lumo, 1]], CO)
E_es1 = CO.get_potential_energy()
niter_es1 = calc_1.get_number_of_iterations()
calc_1.write('dscf_CO_es1.gpw', mode='all')
calc_2 = GPAW(nbands=8, h=0.2, xc='PBE', spinpol=True,
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-9,
'bands': -1})
CO.set_calculator(calc_2)
lumo = dscf.AEOrbital(calc_2, wf_u, p_uai)
dscf.dscf_calculation(calc_2, [[1.0, lumo, 1]], CO)
E_es2 = CO.get_potential_energy()
niter_es2 = calc_2.get_number_of_iterations()
calc_2.write('dscf_CO_es2.gpw', mode='all')
equal(E_es1, E_gs+5.8, 0.1)
equal(E_es1, E_es2, 0.001)
energy_tolerance = 0.001
niter_tolerance = 2
equal(E_gs, -14.9313, energy_tolerance)
equal(niter_gs, 20, niter_tolerance)
equal(E_es1, -9.1067, energy_tolerance)
equal(niter_es1, 22, niter_tolerance)
equal(E_es2, -9.10706, energy_tolerance)
equal(niter_es2, 23, niter_tolerance)
| qsnake/gpaw | gpaw/test/big/miscellaneous/dscf_CO.py | Python | gpl-3.0 | 2,347 | [
"ASE",
"GPAW"
] | 9b2d18ad7ac0ca8d7c37c8d0618b8d988b87fd623c6c065447c8eb9c368437bf |
# -*- coding: utf-8 -*-
#
# evaluate_quantal_stp_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Example for the quantal_stp_synapse
-----------------------------------------
The ``quantal_stp_synapse`` is a stochastic version of the Tsodys-Markram model
for synaptic short term plasticity (STP).
This script compares the two variants of the Tsodyks/Markram synapse in NEST.
This synapse model implements synaptic short-term depression and short-term
facilitation according to the quantal release model described by Fuhrmann et
al. [1]_ and Loebel et al. [2]_.
Each presynaptic spike will stochastically activate a fraction of the
available release sites. This fraction is binomialy distributed and the
release probability per site is governed by the Fuhrmann et al. (2002) model.
The solution of the differential equations is taken from Maass and Markram
2002 [3]_.
The connection weight is interpreted as the maximal weight that can be
obtained if all n release sites are activated.
Parameters
~~~~~~~~~~~~~
The following parameters can be set in the status dictionary:
* U - Maximal fraction of available resources [0,1], default=0.5
* u - available fraction of resources [0,1], default=0.5
* p - probability that a vesicle is available, default = 1.0
* n - total number of release sites, default = 1
* a - number of available release sites, default = n
* tau_rec - time constant for depression in ms, default=800 ms
* tau_rec - time constant for facilitation in ms, default=0 (off)
References
~~~~~~~~~~~~~
.. [1] Fuhrmann G, Segev I, Markram H, and Tsodyks MV. (2002). Coding of
temporal information by activity-dependent synapses. Journal of
Neurophysiology, 8. https://doi.org/10.1152/jn.00258.2001
.. [2] Loebel, A., Silberberg, G., Helbig, D., Markram, H., Tsodyks,
M. V, & Richardson, M. J. E. (2009). Multiquantal release underlies
the distribution of synaptic efficacies in the neocortex. Frontiers
in Computational Neuroscience, 3:27. doi:10.3389/neuro.10.027.
.. [3] Maass W, and Markram H. (2002). Synapses as dynamic memory buffers.
Neural Networks, 15(2), 155-161.
http://dx.doi.org/10.1016/S0893-6080(01)00144-7
"""
import nest
import nest.voltage_trace
import numpy
import pylab
nest.ResetKernel()
################################################################################
# On average, the ``quantal_stp_synapse`` converges to the ``tsodyks2_synapse``,
# so we can compare the two by running multiple trials.
#
# First we define the number of trials as well as the number of release sites.
n_syn = 10.0 # number of synapses in a connection
n_trials = 100 # number of measurement trials
###############################################################################
# Next, we define parameter sets for facilitation
fac_params = {"U": 0.02, "u": 0.02, "tau_fac": 500.,
"tau_rec": 200., "weight": 1.}
###############################################################################
# Then, we assign the parameter set to the synapse models
t1_params = fac_params # for tsodyks2_synapse
t2_params = t1_params.copy() # for quantal_stp_synapse
t1_params['x'] = t1_params['U']
t2_params['n'] = n_syn
###############################################################################
# To make the responses comparable, we have to scale the weight by the
# number of synapses.
t2_params['weight'] = 1. / n_syn
###############################################################################
# Next, we chage the defaults of the various models to our parameters.
nest.SetDefaults("tsodyks2_synapse", t1_params)
nest.SetDefaults("quantal_stp_synapse", t2_params)
nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3.})
###############################################################################
# We create three different neurons.
# Neuron one is the sender, the two other neurons receive the synapses.
neuron = nest.Create("iaf_psc_exp", 3)
###############################################################################
# The connection from neuron 1 to neuron 2 is a deterministic synapse.
nest.Connect([neuron[0]], [neuron[1]], syn_spec="tsodyks2_synapse")
###############################################################################
# The connection from neuron 1 to neuron 3 has a stochastic
# ``quantal_stp_synapse``.
nest.Connect([neuron[0]], [neuron[2]], syn_spec="quantal_stp_synapse")
###############################################################################
# The voltmeter will show us the synaptic responses in neurons 2 and 3.
voltmeter = nest.Create("voltmeter", 2)
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
###############################################################################
# One dry run to bring all synapses into their rest state.
# The default initialization does not achieve this. In large network
# simulations this problem does not show, but in small simulations like
# this, we would see it.
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
###############################################################################
# Only now do we connect the ``voltmeter`` to the neurons.
nest.Connect([voltmeter[0]], [neuron[1]])
nest.Connect([voltmeter[1]], [neuron[2]])
###############################################################################
# This loop runs over the `n_trials` trials and performs a standard protocol
# of a high-rate response, followed by a pause and then a recovery response.
for t in range(n_trials):
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
###############################################################################
# Flush the last voltmeter events from the queue by simulating one time-step.
nest.Simulate(.1)
###############################################################################
# Extract the reference trace.
vm = numpy.array(nest.GetStatus([voltmeter[1]], 'events')[0]['V_m'])
vm_reference = numpy.array(nest.GetStatus([voltmeter[0]], 'events')[0]['V_m'])
vm.shape = (n_trials, 1500)
vm_reference.shape = (n_trials, 1500)
###############################################################################
# Now compute the mean of all trials and plot against trials and references.
vm_mean = numpy.array([numpy.mean(vm[:, i]) for (i, j) in enumerate(vm[0, :])])
vm_ref_mean = numpy.array([numpy.mean(vm_reference[:, i])
for (i, j) in enumerate(vm_reference[0, :])])
pylab.plot(vm_mean)
pylab.plot(vm_ref_mean)
###############################################################################
# Finally, print the mean-suqared error between the trial-average and the
# reference trace. The value should be `< 10^-9`.
print(numpy.mean((vm_ref_mean - vm_mean) ** 2))
| hakonsbm/nest-simulator | pynest/examples/evaluate_quantal_stp_synapse.py | Python | gpl-2.0 | 7,597 | [
"NEURON"
] | fed9f303357aef89c11fb8cc27ea56aedadb426cfc0cefc8e3e43273f395a35e |
# coding=utf-8
from __future__ import absolute_import
from flask import url_for
from flask_login import current_user
import pytest
from firefly.app import create_app
from firefly.ext import db
from firefly.models.user import User
@pytest.fixture
def app(request):
app = create_app('tests/settings.py')
db_name = app.config['MONGODB_SETTINGS']['db']
def cleanup():
db.connection.drop_database(db_name)
request.addfinalizer(cleanup)
return app
@pytest.fixture
def client_class(request, client):
def login(cls):
user = User.objects.filter(email='foo@bar.com').first()
if user is None:
user = User.create_user('foo', 'foo@bar.com', 'foobar')
else:
user.set_password('foobar')
user.save()
form = {
'email': 'foo@bar.com',
'password': 'foobar',
}
rv = client.post(
url_for('home.login'), data=form,
follow_redirects=True
)
assert current_user.is_authenticated()
assert url_for('security.logout') in rv.data
if request.cls is not None:
request.cls.client = client
request.cls._login = login
| thcode/firefly | conftest.py | Python | mit | 1,201 | [
"Firefly"
] | 49941a3523530953ac453e930434106ea3d34de2d11138ab5b5c6a4dc4eeecf7 |
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
try:
from galaxy.model import Job
job_states = Job.states
except ImportError:
# Not in Galaxy, map Galaxy job states to Pulsar ones.
from galaxy.util import enum
job_states = enum(RUNNING='running', OK='complete', QUEUED='queued')
from ..job import BaseJobExec
__all__ = ('Torque',)
from logging import getLogger
log = getLogger(__name__)
argmap = {'destination': '-q',
'Execution_Time': '-a',
'Account_Name': '-A',
'Checkpoint': '-c',
'Error_Path': '-e',
'Group_List': '-g',
'Hold_Types': '-h',
'Join_Paths': '-j',
'Keep_Files': '-k',
'Resource_List': '-l',
'Mail_Points': '-m',
'Mail_Users': '-M',
'Job_Name': '-N',
'Output_Path': '-o',
'Priority': '-p',
'Rerunable': '-r',
'Shell_Path_List': '-S',
'job_array_request': '-t',
'User_List': '-u',
'Variable_List': '-v'}
class Torque(BaseJobExec):
def __init__(self, **params):
self.params = {}
for k, v in params.items():
self.params[k] = v
def job_script_kwargs(self, ofile, efile, job_name):
pbsargs = {'-o': ofile,
'-e': efile,
'-N': job_name}
for k, v in self.params.items():
if k == 'plugin':
continue
try:
if not k.startswith('-'):
k = argmap[k]
pbsargs[k] = v
except:
log.warning('Unrecognized long argument passed to Torque CLI plugin: %s' % k)
template_pbsargs = ''
for k, v in pbsargs.items():
template_pbsargs += '#PBS %s %s\n' % (k, v)
return dict(headers=template_pbsargs)
def submit(self, script_file):
return 'qsub %s' % script_file
def delete(self, job_id):
return 'qdel %s' % job_id
def get_status(self, job_ids=None):
return 'qstat -x'
def get_single_status(self, job_id):
return 'qstat -f %s' % job_id
def parse_status(self, status, job_ids):
# in case there's noise in the output, find the big blob 'o xml
tree = None
rval = {}
for line in status.strip().splitlines():
try:
tree = et.fromstring(line.strip())
assert tree.tag == 'Data'
break
except Exception:
tree = None
if tree is None:
log.warning('No valid qstat XML return from `qstat -x`, got the following: %s' % status)
return None
else:
for job in tree.findall('Job'):
id = job.find('Job_Id').text
if id in job_ids:
state = job.find('job_state').text
# map PBS job states to Galaxy job states.
rval[id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
for line in status.splitlines():
line = line.split(' = ')
if line[0] == 'job_state':
return self._get_job_state(line[1].strip())
# no state found, job has exited
return job_states.OK
def _get_job_state(self, state):
try:
return {
'E': job_states.RUNNING,
'R': job_states.RUNNING,
'Q': job_states.QUEUED,
'C': job_states.OK
}.get(state)
except KeyError:
raise KeyError("Failed to map torque status code [%s] to job state." % state)
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/jobs/runners/util/cli/job/torque.py | Python | gpl-3.0 | 3,726 | [
"Galaxy"
] | 5d3476b438d37e60a21e60a2ad61a9708da32c00cbbf87405dd4f3f3c2e23200 |
#adam-use# I THINK: you run redsequence.py before plot_rederr.py. This code identifies red-sequence galaxies!
# usage: python redsequence [options]
# Identifies and fits the red sequence using apparent magnitude and one color.
# Option of identifying star column and only using objects larger.
import pylab
params_pylab = {'backend' : 'ps',
'text.usetex' : False,
'ps.usedistiller' : 'xpdf',
'ps.distiller.res' : 6000}
pylab.rcParams.update(params_pylab)
fig_size = [5,5]
params_pylab = {'axes.labelsize' : 14,
'text.fontsize' : 14,
'legend.fontsize' : 12,
'xtick.labelsize' : 10,
'ytick.labelsize' : 10,
'scatter.s' : 0.1,
'scatter.marker': 'o',
'figure.figsize' : fig_size}
pylab.rcParams.update(params_pylab)
def sortit(x,y):
if x[0] > y[0]: return -1
else: return 1
def sortit_rev(x,y):
if x[0] > y[0]: return 1
else: return -1
def fit_starcolumn(size, savepng):
import pylab, scipy
boxes = []
coords = []
for increment in [0,0.03]:# ,0.075,0.1]: #1,0.125,0.15,0.175]:
#print size
a,b,varp = pylab.hist(size,bins=scipy.arange(0+increment,2+increment,0.06))
#print a, b
boxes += list(a)
coords += list(b[:-1] + scipy.ones(len(b[:-1]))*(0.03))
tot = scipy.array(boxes).sum()
print tot
all = zip(coords,boxes)
all.sort(sortit_rev)
print all
sum = 0
max = 0
min = 1000000
foundCenter = False
from copy import copy
print all, 'all'
for x,y in all:
print x, y, sum, tot
sum += y
if float(sum)/tot > 0.05:
if y > max and not foundCenter:
max = copy(y)
max_x = copy(x)
print 'max', max
if y/max < 0.98 and not foundCenter:
center = copy(max_x)
print center, 'center'
foundCenter = True
if foundCenter:
print 'min', min, y
if min > y:
min = copy(y)
min_x = copy(x)
print y, min
if y/float(min) > 1.05:
right = copy(min_x)
break
left = center - 1.*abs(right-center)
print center,right, 'center, right'
print len(boxes), len(coords)
pylab.clf()
pylab.scatter(coords,boxes)
pylab.xlim(0,2.5)
pylab.xlabel('SIZE (arcsec)')
pylab.axvline(x=center,ymin=-10,ymax=10)
pylab.axvline(x=left,ymin=-10,ymax=10)
pylab.axvline(x=right,ymin=-10,ymax=10)
pylab.savefig(savepng)
pylab.clf()
return left, right
def fit(colors, c1, c2, m, savepng):
import pylab, scipy
''' essentially fine resolution binning '''
boxes = []
coords = []
for increment in [0,0.025,0.05,0.075,0.1,0.125,0.15,0.175]:
a,b,varp = pylab.hist(colors,bins=scipy.arange(-4+increment,4+increment,0.2))
#print a, b
boxes += list(a)
coords += list(b[:-1] + scipy.ones(len(b[:-1]))*(0.1))
print len(colors), colors, 'len'
tot = scipy.array(boxes).sum()
print tot
solutions = []
for version in ['reverse']: #:,'forward']:
left = -99
center = -99
all = zip(coords,boxes)
if version == 'reverse':
all.sort(sortit)
if version == 'forward':
all.sort(sortit_rev)
print all
pylab.clf()
pylab.scatter(coords,boxes)
#pylab.show()
print 'plotted'
sum = 0
max_y = 0
min = 1000000
foundCenter = False
from copy import copy
print all, 'all'
rev = zip(all[:][1],all[:][0])
a = zip(boxes, coords)
a.sort()
peak = a[-1][1]
foundCenter = False
for x,y in all:
print x, y, sum, tot
print max_y, min, foundCenter, peak
sum += y
#print all[-1][0], all[0][0]
if sum > 0:
if float(tot)/sum > 0.05 and y > 100: #True: # (all[-1][0] < all[0][0] and x < peak ) or (all[-1][0] > all[0][0] and x > peak ): #
if y > max_y and not foundCenter:
max_y = copy(y)
max_x = copy(x)
print 'max', max_y
print y/max_y, (max_y-y)
if y/max_y < 0.98 and (max_y-y) > 15 and not foundCenter:
center = copy(max_x)
print center, 'center', max_y
foundCenter = True
#center = peak
if foundCenter:
print 'min', min, y
if min > y:
min = copy(y)
min_x = copy(x)
print y, min, x
if y/float(min) > 1.04:
left = copy(min_x)
print peak, left, center, 'FOUND ONE'
break
if left != -99:
if left > center:
left = center - max(0.05,abs(center - left))
right = center + max(0.4,1.*abs(left-center))
print center, left, right, peak
print right - peak, peak - left
if True: #right - peak > 0 and peak - left > 0:
solutions.append([center,left,right])
''' pick out the narrower solution '''
if len(solutions) > 1:
if solutions[0][0] - solutions[0][1] < solutions[1][0] - solutions[1][1]:
solution = solutions[0]
else: solution = solutions[1]
else: solution = solutions[0]
center, left, right = solution
print center, left, right
print len(boxes), len(coords)
#print boxes, coords
pylab.clf()
pylab.scatter(coords,boxes)
pylab.xlabel(c1 + ' - ' + c2)
pylab.axvline(x=center,ymin=-10,ymax=10)
pylab.axvline(x=left,ymin=-10,ymax=10)
pylab.axvline(x=right,ymin=-10,ymax=10)
pylab.savefig(savepng)
return left, right
def run():
from optparse import OptionParser
usage = "usage: python redsequence [options] \n\nIdentifies and fits the red sequence using apparent magnitude and one color.\nOption of identifying star column and only using objects larger.\n"
parser = OptionParser(usage)
parser.add_option("-c", "--cluster",
help="name of cluster (i.e. MACS0717+37)")
parser.add_option("-d", "--detectband",
help="detection band (i.e. W-J-V)",default='W-J-V')
parser.add_option("--c1",
help="name of first filter in 'galaxy color' (i.e. MAG_APER1-SUBARU-COADD-1-W-J-V)",default='MAG_APER1-SUBARU-COADD-1-W-J-V')
parser.add_option("--c2",
help="name of second filter in 'galaxy color' (i.e. MAG_APER1-SUBARU-COADD-1-W-C-RC)",default='MAG_APER1-SUBARU-COADD-1-W-C-RC')
parser.add_option("-m",'--m',
help="name of filter to be used as 'galaxy magnitude' (default is '--c2')",default=None)
parser.add_option("-s", "--starcolumn",
help="add to filter out star column",action="store_true",default=False)
parser.add_option('--lm',
help="limiting magnitude applied to 'galaxy magnitude'",default=False)
parser.add_option('-r',"--center_radius",
help="maximum galaxy radius from cluster center (in arcsec) (default=440)",default=660.)
parser.add_option("-l","--location",
help="write output directory",default=None)
parser.add_option("-w","--web",
help="instead write to web (Pat's space)",action="store_true",default=False)
parser.add_option("-z", "--z",
help="see what the photometric redshifts are of redsequence galaxies (requires redshift catalog, obviously)",action='store_true',default=False)
parser.add_option("--cat",
help="name of alternate input catalog (if you don't want to use the default photometry catalog)",default=None)
parser.add_option("--existingcolor",
help="use existing colors of red sequence fit",action="store_true",default=False)
parser.add_option("-e","--existing",
help="use existing red sequence fit",action="store_true",default=False)
(options, args) = parser.parse_args()
if options.m is None:
options.m = options.c2
if options.location is not None and options.web:
print 'Either specify location or web but not both at once'
raise Exception
if options.location is None and options.web is False:
options.location = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + options.cluster + '/PHOTOMETRY_' + options.detectband + '_iso/'
elif options.web:
options.location = '/nfs/slac/g/ki/ki04/pkelly/photoz/' + options.cluster + '/CWWSB_capak.list/'
if options.location[-1] != '/':
options.location = options.location + '/'
print options.location
import os
if options.existingcolor or options.existing:
dir = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + options.cluster + '/LENSING_' + options.detectband + '_' + options.detectband + '_aper/good/'
dict = {}
print 'file', dir + 'redseqfit_2.orig'
redseqfit = open(dir + 'redseqfit_2.orig','r').readlines()
slope = float(redseqfit[1].split('=')[1].split('*')[0])
intercept = float(redseqfit[1][:-1].split('+')[1])
upper_intercept = float(redseqfit[3][:-1].split('+')[1])
lower_intercept = float(redseqfit[4][:-1].split('+')[1])
polycoeffs = [slope, intercept]
std = (upper_intercept - intercept) / 1.2
info = open(dir + 'redseq_all.params','r').readlines()
print info, dir + 'redseq_all.params'
for l in info:
if len(l.split(':')) > 1:
key, value = l[:-1].split(': ')
dict[key] = value
print dict
#options.center_radius = dict['radcut']
def prefix(filt):
if filt is 'g' or filt is 'r' or filt is 'u':
return 'MAG_APER1-MEGAPRIME-COADD-1-' + filt
else:
return 'MAG_APER1-SUBARU-COADD-1-' + filt
dict['slope'] = slope
dict['intercept'] = intercept
dict['lower_intercept'] = lower_intercept
dict['upper_intercept'] = upper_intercept
if options.existing:
options.m = prefix(dict['xmag'])
options.c1 = prefix(dict['greenmag'])
options.c2 = prefix(dict['redmag'])
options.lm = dict['magcut2']
print 'finished'
elif options.existingcolor:
options.c1 = prefix(dict['greenmag'])
options.c2 = prefix(dict['redmag'])
cluster = options.cluster
c1 = options.c1
c2 = options.c2
m = options.m
if options.z:
import astropy, astropy.io.fits as pyfits
cat = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.APER1.1.CWWSB_capak.list.all.bpz.tab'
p = pyfits.open(cat)
photoz = p['STDTAB'].data
zero_IDs = len(photoz[photoz.field('SeqNr')==0])
if zero_IDs > 0:
print 'Wrong photoz catalog?', cat
print str(zero_IDs) + ' many SeqNr=0'
raise Exception
print cat
if options.cat is None: #not hasattr(options,'cat'):
input_mags = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.slr.alter.cat'
else: input_mags = options.cat
import astropy, astropy.io.fits as pyfits, os, sys, pylab, do_multiple_photoz, commands, re, math, scipy
from copy import copy
print 'input magnitude catalog:', input_mags, options.cat, hasattr(options,'cat')
filterlist = do_multiple_photoz.get_filters(input_mags,'OBJECTS')
#print filterlist
print input_mags
w = pyfits.open(input_mags)
mags = w['OBJECTS'].data
#print mags.field('Xpos')
mask = mags.field(c1) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field(c2) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field(m) > -90
if options.z: photoz = photoz[mask]
mags = mags[mask]
mask = mags.field('Flag') == 0
if options.z: photoz_star = photoz[mask]
mags_star = mags[mask]
#mask = mags_star.field(c2) < 23
''' get cluster redshift '''
command = 'grep ' + cluster + ' ' + '/nfs/slac/g/ki/ki05/anja/SUBARU/' + '/clusters.redshifts '
print command
cluster_info = commands.getoutput(command)
cluster_redshift = float(re.split('\s+',cluster_info)[1])
print cluster_redshift
if options.lm:
mag_cut = float(options.lm)
else:
''' compute faint magnitude cutoff '''
if m[-6:] == 'W-C-RC' or m[-1] == 'r':
mag_cut = 21.5 + 2.5*math.log10((cluster_redshift/0.19)**2.)
if m[-5:] == 'W-J-V' or m[-5:] == 'W-J-B' or m[-1] == 'g':
mag_cut = 22. + 2.5*math.log10((cluster_redshift/0.19)**2.)
if not options.center_radius:
''' compute radial size of cut '''
options.center_radius = 400 / (z/0.4)
options.center_radius = 400
print mag_cut, options.lm
if True: #not options.existing:
''' identify star column (optional) '''
if options.starcolumn:
savepng = '/nfs/slac/g/ki/ki04/pkelly/photoz/' + cluster + '/seeing.png'
left, right = fit_starcolumn(mags_star[mask].field('FLUX_RADIUS')*0.2,savepng)
savepng = options.location + 'column.png'
pylab.axvline(x=left,ymin=-10,ymax=100)
pylab.axvline(x=right,ymin=-10,ymax=100)
pylab.scatter(mags.field('FLUX_RADIUS')*0.2,mags.field(m),s=0.25)
pylab.xlim(0,2.5)
pylab.xlabel('SIZE (arcsec)')
pylab.ylabel(m)
pylab.savefig(savepng)
pylab.clf()
mask = mags.field('FLUX_RADIUS')*0.2 > right
if options.z: photoz = photoz[mask]
mags = mags[mask]
''' select galaxies near center of field '''
#options.center_radius=240
mask = ((mags.field('Xpos') - 5000.*scipy.ones(len(mags)))**2. + (mags.field('Ypos') - 5000.*scipy.ones(len(mags)))**2.)**0.5 * 0.2 < float(options.center_radius)
if options.z: photoz = photoz[mask]
mags = mags[mask]
print len(mags)
if options.z: print len(photoz)
from copy import copy
mags_mask = copy(mags)
x = copy(mags.field(m))
y = copy(mags.field(c1)-mags.field(c2))
print mags.field(c1), mags.field(c2), c1, c2
mask = x < mag_cut
print mag_cut
#print x, y
savedir= options.location
os.system('mkdir -p ' + savedir)
savepng = options.location + 'redselection.png'
print options.center_radius, len(y[mask])
left, right = fit(y[mask],c1,c2,m,savepng)
if options.z:
mask = photoz.field('NFILT') > 3
reg_mags = mags_mask[mask]
reg_photoz = photoz[mask]
mask = photoz.field('BPZ_ODDS') > 0.95
reg_mags = mags_mask[mask]
reg_photoz = photoz[mask]
print len(reg_photoz)
print 'making reg'
reg = open('all.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(reg_mags.field('Xpos'))):
reg.write('circle('+str(reg_mags.field('Xpos')[i]) + ',' + str(reg_mags.field('Ypos')[i]) + ',' + str(5) + ') # color=red width=2 text={' + str(reg_photoz.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
print 'finished reg'
mask = x < mag_cut
if options.z:
photoz2 = photoz[mask]
mags_mask = mags_mask[mask]
x2 = x[mask]
y2 = y[mask]
#print sorted(x2)
print savepng
print left, right
if not options.existing:
mask = y2 > left
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
x2 = x2[mask]
y2 = y2[mask]
mask = y2 < right
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
x2 = x2[mask]
y2 = y2[mask]
if not options.existing: polycoeffs = scipy.polyfit(x2,y2,1)
print polycoeffs
yfit = scipy.polyval(polycoeffs, x2)
print x2, yfit
if not options.existing: std = scipy.std(abs(yfit - y2))
print std
mask = abs(yfit - y2) < std*2.5
if options.z: photoz3 = photoz2[mask]
x3 = x2[mask]
y3 = y2[mask]
if not options.existing: polycoeffs = scipy.polyfit(x3,y3,1)
print polycoeffs
yfit = scipy.polyval(polycoeffs, sorted(x2))
print x2, yfit
if not options.existing: std = scipy.std(abs(yfit - y2))
print std
std_fac = 1.2
mask = abs(yfit - y2) < std*std_fac
if options.z:
photoz2 = photoz2[mask]
mags_mask = mags_mask[mask]
print photoz2.field('SeqNr')
print photoz2.field('BPZ_Z_B')
fred = '/nfs/slac/g/ki/ki05/anja/SUBARU/' + cluster + '/PHOTOMETRY_' + options.detectband + '_aper/' + cluster + '.redseq'
f = open(fred,'w')
for id in photoz2.field('SeqNr'):
f.write(str(id) + '\n')
f.close()
reg = open('regseq.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags_mask.field('Xpos'))):
reg.write('circle('+str(mags_mask.field('Xpos')[i]) + ',' + str(mags_mask.field('Ypos')[i]) + ',' + str(5) + ') # color=green width=2 text={' + str(photoz2.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.clf()
savepng = options.location + 'redhistogram.png'
savepdf = options.location + 'redhistogram.pdf'
if options.z:
lower_lim = cluster_redshift - 0.3
if lower_lim < 0: lower_lim = 0.0001
print photoz2.field('BPZ_Z_B')
a,b,varp = pylab.hist(photoz2.field('BPZ_Z_B'),bins=scipy.arange(lower_lim,cluster_redshift+0.3,0.01),color='red')
pylab.axvline(x=cluster_redshift,ymin=0,ymax=100,color='blue',linewidth=3)
pylab.xlabel('Redshift')
pylab.ylabel('Galaxies')
pylab.savefig(savepng)
pylab.savefig(savepdf)
reg = open('reg.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags_mask.field('Xpos'))):
reg.write('circle('+str(mags_mask.field('Xpos')[i]) + ',' + str(mags_mask.field('Ypos')[i]) + ',' + str(5) + ') # color=blue width=2 text={' + str(photoz2.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.clf()
pylab.plot(sorted(x2),yfit,'b-')
pylab.plot(sorted(x2),yfit+scipy.ones(len(yfit))*std*std_fac,'b-')
pylab.plot(sorted(x2),yfit-scipy.ones(len(yfit))*std*std_fac,'b-')
pylab.scatter(x,y,color='red',s=0.5)
pylab.axhline(y=left,xmin=-10,xmax=100)
pylab.axvline(x=mag_cut,ymin=-10,ymax=10)
pylab.axhline(y=right,xmin=-10,xmax=100)
pylab.xlabel(m)
pylab.ylabel(c1 + ' - ' + c2)
if options.z:
mask = abs(photoz.field('BPZ_Z_B') - cluster_redshift) < 0.04
mags = mags[mask]
photoz = photoz[mask]
mask = photoz.field('NFILT') > 4
mags = mags[mask]
photoz = photoz[mask]
print 'priormag'
print photoz.field('priormag')
print 'nfilt'
print photoz.field('NFILT')
import pylab
x = mags.field(m)
y = mags.field(c1)-mags.field(c2)
pylab.scatter(x,y,s=0.5)
reg = open('reg.reg','w')
reg.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n')
for i in range(len(mags.field('Xpos'))):
reg.write('circle('+str(mags.field('Xpos')[i]) + ',' + str(mags.field('Ypos')[i]) + ',' + str(5) + ') # color=red width=2 text={' + str(photoz.field('BPZ_Z_B')[i]) + '}\n')
reg.close()
pylab.xlim(sorted(x)[0],sorted(x)[-2])
span = (sorted(y)[-2]-sorted(y)[2])/2
if span > 1: span=1
median = scipy.median(scipy.array(y))
pylab.ylim(median -2, median + 2)
savepng = options.location + 'cmd.png'
pylab.savefig(savepng)
pylab.clf()
pylab.scatter(mags.field('Xpos'),mags.field('Ypos'), s=0.02)
pylab.xlim([0,10000])
pylab.ylim([0,10000])
pylab.xlabel('X Pixel')
pylab.ylabel('Y Pixel')
savepng = options.location + '/positions.png'
print savepng
pylab.savefig(savepng)
s = "\nBest fit: y = "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]) + '\n'
s += "\nCut: y < "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]+std_fac*std) + '\n'
s += "Cut: y > "+str(polycoeffs[0])+"*x +"+str(polycoeffs[1]-std_fac*std ) + '\n'
s += "x < "+str(mag_cut) + '\n'
s += 'x = ' + m + '\n'
s += 'y = ' + c1 + ' - ' + c2 + '\n'
print s
f = open(options.location + '/redseqfit','w')
f.write(s)
f.close()
from datetime import datetime
t2 = datetime.now()
print options.location
f = open(options.location + '/redsequence.html','w')
f.write('<html><tr><td>' + t2.strftime("%Y-%m-%d %H:%M:%S") + '</td></tr><tr><td><h2>Photometric Redshifts of the Red Sequence</h2></td></tr><tr><td><img src="redhistogram.png"></img></td></tr><tr><td><img src="seeing.png"></img></td></tr><<tr><td><img src="column.png"></img></td></tr><tr><td><img src="redselection.png"></img></td></tr><tr><td><img src="cmd.png"></img></td></tr><tr><td><img src="positions.png"></img></td></tr><tr><td>' + s.replace('\n','<br>') + '</td></tr> </html>')
print 'Wrote output to:', options.location
print 'Best fit parameters in:', options.location + '/redseqfit'
if __name__ == '__main__':
run()
| deapplegate/wtgpipeline | redsequence.py | Python | mit | 24,118 | [
"Galaxy"
] | 09ab1400a77e5ca045e83558ae914e0fe3aedec3fa3068b3e14f1d17c4c56475 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasgallerybase import HasGalleryBase
#-------------------------------------------------------------------------
# "People with media object reference "
#-------------------------------------------------------------------------
class HavePhotos(HasGalleryBase):
"""Rule that checks for person who has media object reference"""
name = _('People with <count> media')
description = _("Matches people with a certain number of items in the gallery")
def __init__(self, arg, use_regex=False):
# Upgrade from pre 3.1 HasPhotos filter, use defaults that correspond
# Previous filter had 0 arguments
if len(arg) == 0:
HasGalleryBase.__init__(self, ["0", 'greater than'], use_regex)
else:
HasGalleryBase.__init__(self, arg, use_regex)
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/person/_hasgallery.py | Python | gpl-2.0 | 2,046 | [
"Brian"
] | 9704091e48f5298a37f724a736bdc7c36b3b63b6fc774af730adaae4b8de1336 |
"""
hull_cost_model.py
Matthew Woodruff (mjw5407@arl.psu.edu)
The Pennsylvania State University
Applied Research Laboratory
2013
Compute the cost and lead time for the hull based on physical
description
"""
import copy
import argparse
import math
import json
import StringIO
from collections import namedtuple
class NoHeaderError(Exception):
pass
def associate(table):
"""
Convert a list-of-lists to a dict-of-dicts.
"""
# stage 1: dict of lists
stage1 = dict([[row[0], row[1:]] for row in table])
header = stage1.get("header", None)
if header is None:
msg = "Need a header to make an associative table.\n"\
"First row was {0}".format(table[0])
raise NoHeaderError(msg)
# stage 2: dict of dicts
stage2 = {}
for key in [key for key in stage1.iterkeys() if key != "header"]:
stage2[key] = dict(zip(header, stage1[key]))
return stage2
class CostModelError(Exception):
pass
class HullCostAndTime(object):
"""
the CostModel contains the logic for each kind of hull production
activity
"""
def __init__(self, tables):
"""
tables: dict of tables (each table is a list of lists)
Because of shared setups, the model is stateful and a new
instance should be used for each evaluation.
"""
# check for appropriate keys
expected = ["model", "shared_setups"]
for table in expected: # raise error if missing data
if tables.get(table, None) is None:
msg = "Hull Cost Model requires a table named {0}"
raise CostModelError(msg.format(table))
self.tables = tables # for now
self.model = associate(tables["model"])
self.operations = self.model.keys()
for row in tables["materials"][1:]:
row[0] = tuple(row[0])
self.materials = associate(tables["materials"])
self.material_types = associate(tables["material_type"])
self.opnames = dict(tables["operations"])
self.setups_counted = []
def material_areas(self, plates):
"""
Compute total area for each (material, thickness)
"""
areas = {}
for plate in plates:
key = plate["key"]
material_type = self.material_types[plate["material"]]
area = areas.get(key, 0)
area += plate["area"]
areas[key] = area
return areas
def material_sheets(self, areas):
"""
Compute number of sheets for each material.
We're not solving the cutting stock problem, and we're
not concerned with panels that are too big to cut out
of a single sheet.
Instead, we compute total area for each type of steel, add
ten percent for scrap, then divide by the area of a sheet
to get the total number of sheets needed.
"""
sheets = {}
for key, area in areas.iteritems():
area *= 1.1 # ten percent scrap
sheetlength = self.materials[key]["length"]
sheetwidth = self.materials[key]["width"]
number = area / (sheetlength * sheetwidth)
sheets[key] = math.ceil(number) # round up to full sheet
return sheets
def nsteelsheets(self, plates):
"""
Total number of steel sheet sides = 2 * number of sheets
(Info needed for blasting.)
"""
areas = self.material_areas(plates)
sheets = self.material_sheets(areas)
nsheets = sum([sheets[key] for key in sheets.iterkeys() if
self.material_types[key[0]]["type"] == "Steel"])
return 2.0 * nsheets
def cuttime(self, plates, technology):
"""
Cutting time is computed the same way for both Plasma
and Laser.
Total cut time depends on thickness, perimeter,
and material of panels.
Assuming you have to cut every edge and you can't use the
edges of the sheets that the plates are cut from.
"""
totaltime = 0
for plate in plates:
key = plate["key"]
material = self.materials[key]
if material["cut"] == technology:
perimeter = 2.0 * plate["perimeter"] # 2 sides
speed = material["cutspeed"]
totaltime += perimeter / speed
return totaltime
def plasmacuttime(self, plates):
return self.cuttime(plates, "Plasma")
def lasercuttime(self, plates):
return self.cuttime(plates, "Laser")
def waterjet(self, plates):
return self.cuttime(plates, "Waterjet")
def perimeter(self, plates, kind):
"""
Used to compute edge prep time. Steel and aluminum are
separate.
"""
perimeter = 0
for plate in plates:
if self.material_types[plate["material"]]["type"] == kind:
perimeter += 2.0 * plate["perimeter"] # 2 sides
return perimeter
def steelperimeter(self, plates):
return self.perimeter(plates, "Steel")
def alperimeter(self, plates):
return self.perimeter(plates, "Aluminum")
def npanels(self, plates):
"""
Total number of panels (all plates) is needed for
build and tack operation
"""
return len(plates)
def weldtime(self, plates, technology):
"""
Welding time depends on perimeter of plates.
We're apparently not concerned that we might need to weld
incompatible plates.
"""
totaltime = 0
for plate in plates:
key = plate["key"]
material = self.materials[key]
if material["weld"] == technology:
perimeter = 2.0 * plate["perimeter"] # 2 sides
speed = material["weldspeed"]
totaltime += (perimeter / (2.0*speed))
return totaltime
def migtime(self, plates):
return self.weldtime(plates, "MIG")
def tigtime(self, plates):
return self.weldtime(plates, "TIG")
def pwmtime(self, plates):
"""
Return 1 for one post-weld machining
"""
return 1
def hullarea(self, plates):
"""
Hull surface area: needed for painting, includes both sides,
units are square meters.
"""
in2 = 2.0 * sum([plate["area"] for plate in plates])
factor = 0.00064516 # to convert to square meters
return in2 * factor
def shipprep(self, plates):
"""
Return 1 for one shipping prep
"""
return 1
def howmuch(self, op, plates):
"""
for given op and plates, how much work needs to be
done? Units vary by operation.
"""
qtys = {
"blast" : self.nsteelsheets,
"plasma" : self.plasmacuttime,
"laser" : self.lasercuttime,
"steelprep" : self.steelperimeter,
"alprep" : self.alperimeter,
"buildntack" : self.npanels,
"mig" : self.migtime,
"tig" : self.tigtime,
"pwm" : self.pwmtime,
"paint" : self.hullarea,
"shipprep" : self.shipprep,
"waterjet" : self.waterjet
}
try:
qty = qtys[op] # KeyError if invalid op
except KeyError:
msg = "{0}: Model has no formula for "\
"work quantity".format(op)
raise CostModelError(msg)
return qty(plates)
def howfast(self, op):
"""
Look up rate in the table. Some rates are 1, meaning that
the quantity of work is already expressed in minutes.
"""
return self.model[op]["workrate"]
def setup(self, op):
"""
Some operations share setups, so if they are both present,
only count one of the setups.
Assume that this method is only called if there is in fact
a setup for the operation.
"""
for group in self.tables["shared_setups"]:
if op in group:
for otherop in group:
if otherop in self.setups_counted:
self.setups_counted.append(op)
return 0
self.setups_counted.append(op)
return self.model[op]["setup"]
def ohqty(self, op, size, laborneeded):
"""
A special exception for painting: overhead is charged by
surface area rather than time.
"""
if op in ["paint"]:
return size
else:
return laborneeded
def prepare_plates(self, plates):
"""
Check that material and dimensional data are present in the
model for all plates
Annotate each plate with its key
Convert units from mm to inches
"""
plates = copy.copy(plates)
counter = 0
for plate in plates:
fields = ["material", "thickness", "area", "perimeter"]
for field in fields:
data = plate.get(field, None)
if data is None:
msg = "No {0} specified for plate {1}"
raise CostModelError(msg.format(field, counter))
material = (plate["material"], plate["thickness"])
plate["key"] = material
material_data = self.materials.get(material, None)
if material_data is None:
msg = "No material data for ({0}, {1}) (plate {2})"
raise CostModelError(
msg.format(material[0], material[1], counter))
# Excel model uses inches
plate["area"] = plate["area"] / 25.4 ** 2 # mm2 to in2
plate["perimeter"] = plate["perimeter"] / 25.4 # mm to in
counter += 1
return plates
def build_cost_and_lead_time(self, plates):
"""
plates: list of summary data about plates
(material, thickness, perimeter, area)
Note that we charge by the hour but work by the minute,
so we convert laborneeded to hours
"""
costs = []
times = []
for op in self.operations:
size = self.howmuch(op, plates)
workrate = self.howfast(op)
if size > 0:
setup = self.setup(op)
else:
setup = 0
laborneeded = (setup + size / workrate) / 60.0
labqty = self.model[op]["labqty"]
labrate = self.model[op]["labrate"]
ohqty = self.ohqty(op, size, laborneeded)
ohrate = self.model[op]["ohrate"]
cost = laborneeded * labqty * labrate \
+ ohqty * ohrate
time = laborneeded
times.append(time)
costs.append(cost)
header = ["operation", "cost", "time"]
operations = [self.opnames[op]
for op in self.operations]
table = [dict(zip(header, record))
for record in zip(operations, costs, times)]
return {"table": table,
"cost": sum(costs), "time": sum(times)}
def material_cost_and_lead_time(self, plates):
cost = 0
time = 0
sheets = self.material_sheets(self.material_areas(plates))
for key in sheets.iterkeys():
matl = self.materials[key]
thickness = key[1]
volume = thickness * matl["length"] * matl["width"]
volume *= sheets[key]
mass = volume * self.material_types[key[0]]["density"]
matlcost = mass * matl["cost"]
cost += matlcost
time = max(time, matl["leadtime"])
return (cost, time)
def mass(plates, materials):
""" compute mass of each plate based on material data """
totalmass = 0
matl_data = dict([(row[0], row[1:]) for row in materials])
for plate in plates:
matl = plate.get("material", "MIL_DTL_12560")
this_matl_data = matl_data.get(matl, [-1, "x", "x", 0.0])
density = this_matl_data[-1] # per cubic inch
thickness = plate.get("thickness", 1.5)
area = plate.get("area", 0)
volume = thickness * area
plate["mass"] = volume * density
totalmass += plate["mass"]
return totalmass
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("hullplates", type=argparse.FileType("r"),
help="json list of plates in the hull")
parser.add_argument("-d", "--model-data",
type=argparse.FileType("r"),
default="hull_cost_data.json",
help="json cost model data")
return parser.parse_args()
def evaluate(plates, tables):
model = HullCostAndTime(tables)
plates = model.prepare_plates(plates)
build = model.build_cost_and_lead_time(plates)
material = model.material_cost_and_lead_time(plates)
matl_cost = material[0]
matl_time = material[1] * 7 # convert weeks to days
build_cost = build["cost"]
build_time = build["time"] / 8.0 # convert hours to days
total_mass = mass(plates, tables["material_type"])
report = {}
report["plates"] = plates
report["Build"] = build["table"]
report["Build Cost"] = build_cost
report["Build Time"] = build_time
report["Material Cost"] = matl_cost
report["Material Lead Time"] = matl_time
report["Total Cost"] = build_cost + matl_cost
report["Total Time"] = build_time + matl_time
report["Total Mass"] = total_mass * 0.453592 # convert to kg
return report
if __name__ == "__main__":
args = get_args()
plates = json.load(args.hullplates)
tables = json.load(args.model_data)
build, material, report = evaluate(plates, tables)
print report
# vim:ts=4:sw=4:expandtab:fdm=indent:wrap lbr:ai:colorcolumn=70
| pombredanne/metamorphosys-desktop | metamorphosys/META/analysis_tools/iFAB/psu_python_library/hull_cost_model.py | Python | mit | 14,518 | [
"BLAST"
] | 12688fa9b7f38765cd7ea7b392f87b73eb33166e6a2385ff2ca844dcf507beb0 |
"""
`quadrature_tables` are organized as follows::
quadrature_tables = {
'<geometry1>' : {
order1 : QuadraturePoints(args1),
order2 : QuadraturePoints(args2),
...
},
'<geometry2>' : {
order1 : QuadraturePoints(args1),
order2 : QuadraturePoints(args2),
...
},
...
}
**Note** The order for quadratures on tensor product domains (`'2_4'`,
`'3_8'` geometries) in case of composite Gauss quadratures (products of
1D quadratures) holds for each component separately, so the actual
polynomial order may be much higher (up to `order * dimension`).
Naming conventions in problem description files::
`<family>_<order>_<dimension>`
Integral 'family' is just an arbitrary name given by user.
Low order quadrature coordinates and weights copied from The Finite Element
Method Displayed by Gouri Dhatt and Gilbert Touzat, Wiley-Interscience
Production, 1984.
The line integral (geometry '1_2') coordinates and weights are from Abramowitz,
M. and Stegun, I.A., Handbook of Mathematical Functions, Dover Publications,
New York, 1972. The triangle (geometry '2_3') coordinates and weights are from
Dunavant, D.A., High Degree Efficient Symmetrical Gaussian Quadrature Rules for
the Triangle, Int. J. Num. Meth. Eng., 21 (1985) pp 1129-1148 - only rules with
points inside the reference triangle are used. The actual values were copied
from PHAML (http://math.nist.gov/phaml/), see also Mitchell, W.F., PHAML User's
Guide, NISTIR 7374, 2006.
Quadrature rules for the quadrilateral (geometry '2_4') and hexahedron
(geometry '3_8') of order higher than 5 are computed as the tensor product of
the line (geometry '1_2') rules.
Quadrature rules for the triangle (geometry '2_3') and tetrahedron (geometry
'3_4') of order higher than 19 and 6, respectively follow A. Grundmann and
H.M. Moeller, Invariant integration formulas for the n-simplex by combinatorial
methods, SIAM J. Numer. Anal. 15 (1978), 282--290. The generating function was
adapted from pytools/hegde codes (http://mathema.tician.de/software/hedge) by
Andreas Kloeckner.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, assert_, Struct
from sfepy.discrete.simplex_cubature import get_simplex_cubature
import six
simplex_geometries = ['1_2', '2_3', '3_4']
tp_geometries = ['2_4', '3_8']
_msg1 = 'WARNING: quadrature order %s is not available for geometry %s!'
_msg2 = 'WARNING: using %d instead!'
def get_actual_order(geometry, order):
"""
Return the actual integration order for given geometry.
Parameters
----------
geometry : str
The geometry key describing the integration domain,
see the keys of `quadrature_tables`.
Returns
-------
order : int
If `order` is in quadrature tables it is this
value. Otherwise it is the closest higher order. If no
higher order is available, a warning is printed and the
highest available order is used.
"""
table = quadrature_tables[geometry]
if order not in table:
orders = list(table.keys())
ii = nm.searchsorted(orders, order)
if ii >= len(orders):
omax = max(orders)
output(_msg1 % (order, geometry))
output(_msg2 % omax)
order = omax
else:
order = orders[ii]
return order
class QuadraturePoints(Struct):
"""
Representation of a set of quadrature points.
Parameters
----------
data : array_like
The array of shape `(n_point, dim + 1)` of quadrature point
coordinates (first `dim` columns) and weights (the last column).
coors : array_like, optional
Optionally, instead of using `data`, the coordinates and weights can
be provided separately - `data` are then ignored.
weights : array_like, optional
Optionally, instead of using `data`, the coordinates and weights can
be provided separately - `data` are then ignored.
bounds : (float, float), optional
The coordinates and weights should correspond to a reference
element in `[0, 1]` x `dim`. Provide the correct bounds if this is
not the case.
tp_fix : float, optional
The value that is used to multiply the tensor product element
volume (= 1.0) to get the correct volume.
weight_fix : float, optional
The value that is used to multiply the weights to get the correct
values.
symmetric : bool
If True, the integral is 1D and the given coordinates and weights are
symmetric w.r.t. the centre of bounds; only the non-negative
coordinates are given.
"""
@staticmethod
def from_table(geometry, order):
"""
Create a new :class:`QuadraturePoints` instance, given reference
element geometry name and polynomial order. For tensor product
geometries, the polynomial order is the 1D (line) order.
"""
table = quadrature_tables[geometry]
if geometry in simplex_geometries:
if order > max_orders[geometry]:
oo = order // 2
dim = int(geometry[0])
tp_fix = 0.5 if dim == 2 else 1.0 / 6.0
coors, weights, exact = get_simplex_cubature(oo, dim)
qp = QuadraturePoints(None, coors=coors, weights=weights,
bounds=(-1.0, 1.0), tp_fix=tp_fix)
assert_(exact >= order)
else:
order = get_actual_order(geometry, order)
qp = table[order]
qp.order = order
else:
order1d = order
dim = int(geometry[0])
order = dim * order1d
if order <= max_orders[geometry]:
order = get_actual_order(geometry, order)
qp = table[order]
qp.order = order
else:
oo = get_actual_order('1_2', order1d)
qp1d = quadrature_tables['1_2'][oo]
weights = nm.outer(qp1d.weights, qp1d.weights)
nc = qp1d.coors.shape[0]
if dim == 3:
weights = nm.outer(qp1d.weights, weights)
iz, iy, ix = nm.mgrid[0:nc, 0:nc, 0:nc]
coors = nm.c_[qp1d.coors[ix.ravel()],
qp1d.coors[iy.ravel()],
qp1d.coors[iz.ravel()]].copy()
else:
iy, ix = nm.mgrid[0:nc, 0:nc]
coors = nm.c_[qp1d.coors[ix.ravel()],
qp1d.coors[iy.ravel()]].copy()
weights = weights.ravel()
qp = QuadraturePoints(None, coors=coors, weights=weights)
qp.order = dim * oo
return qp
def __init__(self, data, coors=None, weights=None, bounds=None, tp_fix=1.0,
weight_fix=1.0, symmetric=False):
if coors is None:
data = nm.array(data, dtype=nm.float64, ndmin=2)
self.coors = data[:,:-1].copy()
self.weights = data[:,-1].copy()
elif weights is not None:
self.coors = nm.array(coors, dtype=nm.float64, ndmin=2)
self.weights = nm.array(weights, dtype=nm.float64)
else:
raise ValueError('both "coors" and "weights" have to be provided!')
self.weights *= weight_fix
self.n_point, self.dim = self.coors.shape
self.bounds = (0, 1)
bbox = nm.array([self.bounds] * self.dim, dtype=nm.float64)
self.volume = nm.prod(bbox.sum(axis=1)) * tp_fix
if symmetric:
isym = 0 if data[0, 0] == 0 else None
if bounds is not None:
# Transform from given bounds to self.bounds.
bbox = nm.array([bounds] * self.dim, dtype=nm.float64)
volume = nm.prod(nm.diff(bbox, axis=1)) * tp_fix
a, b = bounds
c, d = self.bounds
c1 = (d - c) / (b - a)
c2 = ((b * c) - (a * d)) / (b - a)
self.coors = c1 * self.coors + c2
self.weights *= self.volume / volume
if symmetric:
if self.coors.shape[1] != 1:
msg = 'symmetric mode is allowed for 1D integrals only!'
raise ValueError(msg)
origin = 0.5 * (self.bounds[0] + self.bounds[1])
self.coors = nm.r_[2 * origin - self.coors[:isym:-1], self.coors]
self.weights = nm.r_[self.weights[:isym:-1], self.weights]
_QP = QuadraturePoints
quadrature_tables = {
'0_1' : {
1 : _QP([[0.0, 1.0]])
},
'1_2' : {
1 : _QP([[0.000000000000000e+00, 2.0]],
bounds=(-1.0, 1.0), symmetric=True),
3 : _QP([[0.577350269189626e+00, 1.0]],
bounds=(-1.0, 1.0), symmetric=True),
5 : _QP([[0.000000000000000e+00, 0.888888888888889e+00],
[0.774596669241483e+00, 0.555555555555556e+00]],
bounds=(-1.0, 1.0), symmetric=True),
7 : _QP([[0.339981043584856e+00, 0.652145154862546e+00],
[0.861136311594053e+00, 0.347854845137454e+00]],
bounds=(-1.0, 1.0), symmetric=True),
9 : _QP([[0.000000000000000e+00, 0.568888888888889e+00],
[0.538469310105683e+00, 0.478628670499366e+00],
[0.906179845938664e+00, 0.236926885056189e+00]],
bounds=(-1.0, 1.0), symmetric=True),
11 : _QP([[0.238619186083197e+00, 0.467913934572691e+00],
[0.661209386466265e+00, 0.360761573048139e+00],
[0.932469514203152e+00, 0.171324492379170e+00]],
bounds=(-1.0, 1.0), symmetric=True),
13 : _QP([[0.000000000000000e+00, 0.417959183673469e+00],
[0.405845151377397e+00, 0.381830050505119e+00],
[0.741531185599394e+00, 0.279705391489277e+00],
[0.949107912342759e+00, 0.129484966168870e+00]],
bounds=(-1.0, 1.0), symmetric=True),
15 : _QP([[0.183434642495650e+00, 0.362683783378362e+00],
[0.525532409916329e+00, 0.313706645877887e+00],
[0.796666477413627e+00, 0.222381034453374e+00],
[0.960289856497536e+00, 0.101228536290376e+00]],
bounds=(-1.0, 1.0), symmetric=True),
17 : _QP([[0.000000000000000e+00, 0.330239355001260e+00],
[0.324253423403809e+00, 0.312347077040003e+00],
[0.613371432700590e+00, 0.260610696402935e+00],
[0.836031107326636e+00, 0.180648160694857e+00],
[0.968160239507626e+00, 0.081274388361574e+00]],
bounds=(-1.0, 1.0), symmetric=True),
19 : _QP([[0.148874338981631e+00, 0.295524224714753e+00],
[0.433395394129247e+00, 0.269266719309996e+00],
[0.679409568299024e+00, 0.219086362515982e+00],
[0.865063366688985e+00, 0.149451349150581e+00],
[0.973906528517172e+00, 0.066671344308688e+00]],
bounds=(-1.0, 1.0), symmetric=True),
23 : _QP([[0.125233408511469e+00, 0.249147045813403e+00],
[0.367831498998180e+00, 0.233492536538355e+00],
[0.587317954286617e+00, 0.203167426723066e+00],
[0.769902674194305e+00, 0.160078328543346e+00],
[0.904117256370475e+00, 0.106939325995318e+00],
[0.981560634246719e+00, 0.047175336386512e+00]],
bounds=(-1.0, 1.0), symmetric=True),
31 : _QP([[0.095012509837637440185e+00, 0.189450610455068496285e+00],
[0.281603550779258913230e+00, 0.182603415044923588867e+00],
[0.458016777657227386342e+00, 0.169156519395002538189e+00],
[0.617876244402643748447e+00, 0.149595988816576732081e+00],
[0.755404408355003033895e+00, 0.124628971255533872052e+00],
[0.865631202387831743880e+00, 0.095158511682492784810e+00],
[0.944575023073232576078e+00, 0.062253523938647892863e+00],
[0.989400934991649932596e+00, 0.027152459411754094852e+00]],
bounds=(-1.0, 1.0), symmetric=True),
39 : _QP([[0.076526521133497333755e+00, 0.152753387130725850698e+00],
[0.227785851141645078080e+00, 0.149172986472603746788e+00],
[0.373706088715419560673e+00, 0.142096109318382051329e+00],
[0.510867001950827098004e+00, 0.131688638449176626898e+00],
[0.636053680726515025453e+00, 0.118194531961518417312e+00],
[0.746331906460150792614e+00, 0.101930119817240435037e+00],
[0.839116971822218823395e+00, 0.083276741576704748725e+00],
[0.912234428251325905868e+00, 0.062672048334109063570e+00],
[0.963971927277913791268e+00, 0.040601429800386941331e+00],
[0.993128599185094924786e+00, 0.017614007139152118312e+00]],
bounds=(-1.0, 1.0), symmetric=True),
47 : _QP([[0.064056892862605626085e+00, 0.127938195346752156974e+00],
[0.191118867473616309159e+00, 0.125837456346828296121e+00],
[0.315042679696163374387e+00, 0.121670472927803391204e+00],
[0.433793507626045138487e+00, 0.115505668053725601353e+00],
[0.545421471388839535658e+00, 0.107444270115965634783e+00],
[0.648093651936975569252e+00, 0.097618652104113888270e+00],
[0.740124191578554364244e+00, 0.086190161531953275917e+00],
[0.820001985973902921954e+00, 0.073346481411080305734e+00],
[0.886415527004401034213e+00, 0.059298584915436780746e+00],
[0.938274552002732758524e+00, 0.044277438817419806169e+00],
[0.974728555971309498198e+00, 0.028531388628933663181e+00],
[0.995187219997021360180e+00, 0.012341229799987199547e+00]],
bounds=(-1.0, 1.0), symmetric=True),
},
'2_3' : {
1 : _QP([[1.0/3.0, 1.0/3.0, 0.5]],
tp_fix=0.5),
2 : _QP([[1.0/6.0, 1.0/6.0, 1.0/6.0],
[2.0/3.0, 1.0/6.0, 1.0/6.0],
[1.0/6.0, 2.0/3.0, 1.0/6.0]],
tp_fix=0.5),
3 : _QP([[1.0/3.0, 1.0/3.0,-27.0/96.0],
[1.0/5.0, 1.0/5.0, 25.0/96.0],
[3.0/5.0, 1.0/5.0, 25.0/96.0],
[1.0/5.0, 3.0/5.0, 25.0/96.0]],
tp_fix=0.5),
4 : _QP([[0.445948490915965e+00, 0.445948490915965e+00, 0.223381589678011e+00],
[0.108103018168070e+00, 0.445948490915965e+00, 0.223381589678011e+00],
[0.445948490915965e+00, 0.108103018168070e+00, 0.223381589678011e+00],
[0.091576213509771e+00, 0.091576213509771e+00, 0.109951743655322e+00],
[0.816847572980459e+00, 0.091576213509771e+00, 0.109951743655322e+00],
[0.091576213509771e+00, 0.816847572980459e+00, 0.109951743655322e+00]],
tp_fix=0.5, weight_fix=0.5),
5 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.225000000000000e+00],
[0.470142064105115e+00, 0.470142064105115e+00, 0.132394152788506e+00],
[0.059715871789770e+00, 0.470142064105115e+00, 0.132394152788506e+00],
[0.470142064105115e+00, 0.059715871789770e+00, 0.132394152788506e+00],
[0.101286507323456e+00, 0.101286507323456e+00, 0.125939180544827e+00],
[0.797426985353087e+00, 0.101286507323456e+00, 0.125939180544827e+00],
[0.101286507323456e+00, 0.797426985353087e+00, 0.125939180544827e+00]],
tp_fix=0.5, weight_fix=0.5),
6 : _QP([[0.249286745170910e+00, 0.249286745170910e+00, 0.116786275726379e+00],
[0.501426509658179e+00, 0.249286745170910e+00, 0.116786275726379e+00],
[0.249286745170910e+00, 0.501426509658179e+00, 0.116786275726379e+00],
[0.063089014491502e+00, 0.063089014491502e+00, 0.050844906370207e+00],
[0.873821971016996e+00, 0.063089014491502e+00, 0.050844906370207e+00],
[0.063089014491502e+00, 0.873821971016996e+00, 0.050844906370207e+00],
[0.310352451033784e+00, 0.636502499121399e+00, 0.082851075618374e+00],
[0.636502499121399e+00, 0.310352451033784e+00, 0.082851075618374e+00],
[0.053145049844817e+00, 0.636502499121399e+00, 0.082851075618374e+00],
[0.636502499121399e+00, 0.053145049844817e+00, 0.082851075618374e+00],
[0.310352451033784e+00, 0.053145049844817e+00, 0.082851075618374e+00],
[0.053145049844817e+00, 0.310352451033784e+00, 0.082851075618374e+00]],
tp_fix=0.5, weight_fix=0.5),
7 : _QP([[0.333333333333333e+00, 0.333333333333333e+00,-0.149570044467682e+00],
[0.260345966079040e+00, 0.260345966079040e+00, 0.175615257433208e+00],
[0.479308067841920e+00, 0.260345966079040e+00, 0.175615257433208e+00],
[0.260345966079040e+00, 0.479308067841920e+00, 0.175615257433208e+00],
[0.065130102902216e+00, 0.065130102902216e+00, 0.053347235608838e+00],
[0.869739794195568e+00, 0.065130102902216e+00, 0.053347235608838e+00],
[0.065130102902216e+00, 0.869739794195568e+00, 0.053347235608838e+00],
[0.312865496004874e+00, 0.638444188569810e+00, 0.077113760890257e+00],
[0.638444188569810e+00, 0.312865496004874e+00, 0.077113760890257e+00],
[0.048690315425316e+00, 0.638444188569810e+00, 0.077113760890257e+00],
[0.638444188569810e+00, 0.048690315425316e+00, 0.077113760890257e+00],
[0.312865496004874e+00, 0.048690315425316e+00, 0.077113760890257e+00],
[0.048690315425316e+00, 0.312865496004874e+00, 0.077113760890257e+00]],
tp_fix=0.5, weight_fix=0.5),
8 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.144315607677787e+00],
[0.459292588292723e+00, 0.459292588292723e+00, 0.095091634267285e+00],
[0.081414823414554e+00, 0.459292588292723e+00, 0.095091634267285e+00],
[0.459292588292723e+00, 0.081414823414554e+00, 0.095091634267285e+00],
[0.170569307751760e+00, 0.170569307751760e+00, 0.103217370534718e+00],
[0.658861384496480e+00, 0.170569307751760e+00, 0.103217370534718e+00],
[0.170569307751760e+00, 0.658861384496480e+00, 0.103217370534718e+00],
[0.050547228317031e+00, 0.050547228317031e+00, 0.032458497623198e+00],
[0.898905543365938e+00, 0.050547228317031e+00, 0.032458497623198e+00],
[0.050547228317031e+00, 0.898905543365938e+00, 0.032458497623198e+00],
[0.263112829634638e+00, 0.728492392955404e+00, 0.027230314174435e+00],
[0.728492392955404e+00, 0.263112829634638e+00, 0.027230314174435e+00],
[0.008394777409958e+00, 0.728492392955404e+00, 0.027230314174435e+00],
[0.728492392955404e+00, 0.008394777409958e+00, 0.027230314174435e+00],
[0.263112829634638e+00, 0.008394777409958e+00, 0.027230314174435e+00],
[0.008394777409958e+00, 0.263112829634638e+00, 0.027230314174435e+00]],
tp_fix=0.5, weight_fix=0.5),
9 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.097135796282799e+00],
[0.489682519198738e+00, 0.489682519198738e+00, 0.031334700227139e+00],
[0.020634961602525e+00, 0.489682519198738e+00, 0.031334700227139e+00],
[0.489682519198738e+00, 0.020634961602525e+00, 0.031334700227139e+00],
[0.437089591492937e+00, 0.437089591492937e+00, 0.077827541004774e+00],
[0.125820817014127e+00, 0.437089591492937e+00, 0.077827541004774e+00],
[0.437089591492937e+00, 0.125820817014127e+00, 0.077827541004774e+00],
[0.188203535619033e+00, 0.188203535619033e+00, 0.079647738927210e+00],
[0.623592928761935e+00, 0.188203535619033e+00, 0.079647738927210e+00],
[0.188203535619033e+00, 0.623592928761935e+00, 0.079647738927210e+00],
[0.044729513394453e+00, 0.044729513394453e+00, 0.025577675658698e+00],
[0.910540973211095e+00, 0.044729513394453e+00, 0.025577675658698e+00],
[0.044729513394453e+00, 0.910540973211095e+00, 0.025577675658698e+00],
[0.221962989160766e+00, 0.741198598784498e+00, 0.043283539377289e+00],
[0.741198598784498e+00, 0.221962989160766e+00, 0.043283539377289e+00],
[0.036838412054736e+00, 0.741198598784498e+00, 0.043283539377289e+00],
[0.741198598784498e+00, 0.036838412054736e+00, 0.043283539377289e+00],
[0.221962989160766e+00, 0.036838412054736e+00, 0.043283539377289e+00],
[0.036838412054736e+00, 0.221962989160766e+00, 0.043283539377289e+00]],
tp_fix=0.5, weight_fix=0.5),
10 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.908179903827540e-01],
[0.485577633383657e+00, 0.485577633383657e+00, 0.367259577564670e-01],
[0.288447332326850e-01, 0.485577633383657e+00, 0.367259577564670e-01],
[0.485577633383657e+00, 0.288447332326850e-01, 0.367259577564670e-01],
[0.109481575485037e+00, 0.109481575485037e+00, 0.453210594355280e-01],
[0.781036849029926e+00, 0.109481575485037e+00, 0.453210594355280e-01],
[0.109481575485037e+00, 0.781036849029926e+00, 0.453210594355280e-01],
[0.307939838764121e+00, 0.550352941820999e+00, 0.727579168454200e-01],
[0.550352941820999e+00, 0.307939838764121e+00, 0.727579168454200e-01],
[0.141707219414880e+00, 0.550352941820999e+00, 0.727579168454200e-01],
[0.550352941820999e+00, 0.141707219414880e+00, 0.727579168454200e-01],
[0.307939838764121e+00, 0.141707219414880e+00, 0.727579168454200e-01],
[0.141707219414880e+00, 0.307939838764121e+00, 0.727579168454200e-01],
[0.246672560639903e+00, 0.728323904597411e+00, 0.283272425310570e-01],
[0.728323904597411e+00, 0.246672560639903e+00, 0.283272425310570e-01],
[0.250035347626860e-01, 0.728323904597411e+00, 0.283272425310570e-01],
[0.728323904597411e+00, 0.250035347626860e-01, 0.283272425310570e-01],
[0.246672560639903e+00, 0.250035347626860e-01, 0.283272425310570e-01],
[0.250035347626860e-01, 0.246672560639903e+00, 0.283272425310570e-01],
[0.668032510122000e-01, 0.923655933587500e+00, 0.942166696373300e-02],
[0.923655933587500e+00, 0.668032510122000e-01, 0.942166696373300e-02],
[0.954081540029900e-02, 0.923655933587500e+00, 0.942166696373300e-02],
[0.923655933587500e+00, 0.954081540029900e-02, 0.942166696373300e-02],
[0.668032510122000e-01, 0.954081540029900e-02, 0.942166696373300e-02],
[0.954081540029900e-02, 0.668032510122000e-01, 0.942166696373300e-02]],
tp_fix=0.5, weight_fix=0.5),
12 : _QP([[0.488217389773805e+00, 0.488217389773805e+00, 0.257310664404550e-01],
[0.235652204523900e-01, 0.488217389773805e+00, 0.257310664404550e-01],
[0.488217389773805e+00, 0.235652204523900e-01, 0.257310664404550e-01],
[0.439724392294460e+00, 0.439724392294460e+00, 0.436925445380380e-01],
[0.120551215411079e+00, 0.439724392294460e+00, 0.436925445380380e-01],
[0.439724392294460e+00, 0.120551215411079e+00, 0.436925445380380e-01],
[0.271210385012116e+00, 0.271210385012116e+00, 0.628582242178850e-01],
[0.457579229975768e+00, 0.271210385012116e+00, 0.628582242178850e-01],
[0.271210385012116e+00, 0.457579229975768e+00, 0.628582242178850e-01],
[0.127576145541586e+00, 0.127576145541586e+00, 0.347961129307090e-01],
[0.744847708916828e+00, 0.127576145541586e+00, 0.347961129307090e-01],
[0.127576145541586e+00, 0.744847708916828e+00, 0.347961129307090e-01],
[0.213173504532100e-01, 0.213173504532100e-01, 0.616626105155900e-02],
[0.957365299093579e+00, 0.213173504532100e-01, 0.616626105155900e-02],
[0.213173504532100e-01, 0.957365299093579e+00, 0.616626105155900e-02],
[0.275713269685514e+00, 0.608943235779788e+00, 0.403715577663810e-01],
[0.608943235779788e+00, 0.275713269685514e+00, 0.403715577663810e-01],
[0.115343494534698e+00, 0.608943235779788e+00, 0.403715577663810e-01],
[0.608943235779788e+00, 0.115343494534698e+00, 0.403715577663810e-01],
[0.275713269685514e+00, 0.115343494534698e+00, 0.403715577663810e-01],
[0.115343494534698e+00, 0.275713269685514e+00, 0.403715577663810e-01],
[0.281325580989940e+00, 0.695836086787803e+00, 0.223567732023030e-01],
[0.695836086787803e+00, 0.281325580989940e+00, 0.223567732023030e-01],
[0.228383322222570e-01, 0.695836086787803e+00, 0.223567732023030e-01],
[0.695836086787803e+00, 0.228383322222570e-01, 0.223567732023030e-01],
[0.281325580989940e+00, 0.228383322222570e-01, 0.223567732023030e-01],
[0.228383322222570e-01, 0.281325580989940e+00, 0.223567732023030e-01],
[0.116251915907597e+00, 0.858014033544073e+00, 0.173162311086590e-01],
[0.858014033544073e+00, 0.116251915907597e+00, 0.173162311086590e-01],
[0.257340505483300e-01, 0.858014033544073e+00, 0.173162311086590e-01],
[0.858014033544073e+00, 0.257340505483300e-01, 0.173162311086590e-01],
[0.116251915907597e+00, 0.257340505483300e-01, 0.173162311086590e-01],
[0.257340505483300e-01, 0.116251915907597e+00, 0.173162311086590e-01]],
tp_fix=0.5, weight_fix=0.5),
13 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.525209234008020e-01],
[0.495048184939705e+00, 0.495048184939705e+00, 0.112801452093300e-01],
[0.990363012059100e-02, 0.495048184939705e+00, 0.112801452093300e-01],
[0.495048184939705e+00, 0.990363012059100e-02, 0.112801452093300e-01],
[0.468716635109574e+00, 0.468716635109574e+00, 0.314235183624540e-01],
[0.625667297808520e-01, 0.468716635109574e+00, 0.314235183624540e-01],
[0.468716635109574e+00, 0.625667297808520e-01, 0.314235183624540e-01],
[0.414521336801277e+00, 0.414521336801277e+00, 0.470725025041940e-01],
[0.170957326397447e+00, 0.414521336801277e+00, 0.470725025041940e-01],
[0.414521336801277e+00, 0.170957326397447e+00, 0.470725025041940e-01],
[0.229399572042831e+00, 0.229399572042831e+00, 0.473635865363550e-01],
[0.541200855914337e+00, 0.229399572042831e+00, 0.473635865363550e-01],
[0.229399572042831e+00, 0.541200855914337e+00, 0.473635865363550e-01],
[0.114424495196330e+00, 0.114424495196330e+00, 0.311675290457940e-01],
[0.771151009607340e+00, 0.114424495196330e+00, 0.311675290457940e-01],
[0.114424495196330e+00, 0.771151009607340e+00, 0.311675290457940e-01],
[0.248113913634590e-01, 0.248113913634590e-01, 0.797577146507400e-02],
[0.950377217273082e+00, 0.248113913634590e-01, 0.797577146507400e-02],
[0.248113913634590e-01, 0.950377217273082e+00, 0.797577146507400e-02],
[0.268794997058761e+00, 0.636351174561660e+00, 0.368484027287320e-01],
[0.636351174561660e+00, 0.268794997058761e+00, 0.368484027287320e-01],
[0.948538283795790e-01, 0.636351174561660e+00, 0.368484027287320e-01],
[0.636351174561660e+00, 0.948538283795790e-01, 0.368484027287320e-01],
[0.268794997058761e+00, 0.948538283795790e-01, 0.368484027287320e-01],
[0.948538283795790e-01, 0.268794997058761e+00, 0.368484027287320e-01],
[0.291730066734288e+00, 0.690169159986905e+00, 0.174014633038220e-01],
[0.690169159986905e+00, 0.291730066734288e+00, 0.174014633038220e-01],
[0.181007732788070e-01, 0.690169159986905e+00, 0.174014633038220e-01],
[0.690169159986905e+00, 0.181007732788070e-01, 0.174014633038220e-01],
[0.291730066734288e+00, 0.181007732788070e-01, 0.174014633038220e-01],
[0.181007732788070e-01, 0.291730066734288e+00, 0.174014633038220e-01],
[0.126357385491669e+00, 0.851409537834241e+00, 0.155217868390450e-01],
[0.851409537834241e+00, 0.126357385491669e+00, 0.155217868390450e-01],
[0.222330766740900e-01, 0.851409537834241e+00, 0.155217868390450e-01],
[0.851409537834241e+00, 0.222330766740900e-01, 0.155217868390450e-01],
[0.126357385491669e+00, 0.222330766740900e-01, 0.155217868390450e-01],
[0.222330766740900e-01, 0.126357385491669e+00, 0.155217868390450e-01]],
tp_fix=0.5, weight_fix=0.5),
14 : _QP([[0.488963910362179e+00, 0.488963910362179e+00, 0.218835813694290e-01],
[0.220721792756430e-01, 0.488963910362179e+00, 0.218835813694290e-01],
[0.488963910362179e+00, 0.220721792756430e-01, 0.218835813694290e-01],
[0.417644719340454e+00, 0.417644719340454e+00, 0.327883535441250e-01],
[0.164710561319092e+00, 0.417644719340454e+00, 0.327883535441250e-01],
[0.417644719340454e+00, 0.164710561319092e+00, 0.327883535441250e-01],
[0.273477528308839e+00, 0.273477528308839e+00, 0.517741045072920e-01],
[0.453044943382323e+00, 0.273477528308839e+00, 0.517741045072920e-01],
[0.273477528308839e+00, 0.453044943382323e+00, 0.517741045072920e-01],
[0.177205532412543e+00, 0.177205532412543e+00, 0.421625887369930e-01],
[0.645588935174913e+00, 0.177205532412543e+00, 0.421625887369930e-01],
[0.177205532412543e+00, 0.645588935174913e+00, 0.421625887369930e-01],
[0.617998830908730e-01, 0.617998830908730e-01, 0.144336996697770e-01],
[0.876400233818255e+00, 0.617998830908730e-01, 0.144336996697770e-01],
[0.617998830908730e-01, 0.876400233818255e+00, 0.144336996697770e-01],
[0.193909612487010e-01, 0.193909612487010e-01, 0.492340360240000e-02],
[0.961218077502598e+00, 0.193909612487010e-01, 0.492340360240000e-02],
[0.193909612487010e-01, 0.961218077502598e+00, 0.492340360240000e-02],
[0.172266687821356e+00, 0.770608554774996e+00, 0.246657532125640e-01],
[0.770608554774996e+00, 0.172266687821356e+00, 0.246657532125640e-01],
[0.571247574036480e-01, 0.770608554774996e+00, 0.246657532125640e-01],
[0.770608554774996e+00, 0.571247574036480e-01, 0.246657532125640e-01],
[0.172266687821356e+00, 0.571247574036480e-01, 0.246657532125640e-01],
[0.571247574036480e-01, 0.172266687821356e+00, 0.246657532125640e-01],
[0.336861459796345e+00, 0.570222290846683e+00, 0.385715107870610e-01],
[0.570222290846683e+00, 0.336861459796345e+00, 0.385715107870610e-01],
[0.929162493569720e-01, 0.570222290846683e+00, 0.385715107870610e-01],
[0.570222290846683e+00, 0.929162493569720e-01, 0.385715107870610e-01],
[0.336861459796345e+00, 0.929162493569720e-01, 0.385715107870610e-01],
[0.929162493569720e-01, 0.336861459796345e+00, 0.385715107870610e-01],
[0.298372882136258e+00, 0.686980167808088e+00, 0.144363081135340e-01],
[0.686980167808088e+00, 0.298372882136258e+00, 0.144363081135340e-01],
[0.146469500556540e-01, 0.686980167808088e+00, 0.144363081135340e-01],
[0.686980167808088e+00, 0.146469500556540e-01, 0.144363081135340e-01],
[0.298372882136258e+00, 0.146469500556540e-01, 0.144363081135340e-01],
[0.146469500556540e-01, 0.298372882136258e+00, 0.144363081135340e-01],
[0.118974497696957e+00, 0.879757171370171e+00, 0.501022883850100e-02],
[0.879757171370171e+00, 0.118974497696957e+00, 0.501022883850100e-02],
[0.126833093287200e-02, 0.879757171370171e+00, 0.501022883850100e-02],
[0.879757171370171e+00, 0.126833093287200e-02, 0.501022883850100e-02],
[0.118974497696957e+00, 0.126833093287200e-02, 0.501022883850100e-02],
[0.126833093287200e-02, 0.118974497696957e+00, 0.501022883850100e-02]],
tp_fix=0.5, weight_fix=0.5),
17 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.334371992908030e-01],
[0.497170540556774e+00, 0.497170540556774e+00, 0.509341544050700e-02],
[0.565891888645200e-02, 0.497170540556774e+00, 0.509341544050700e-02],
[0.497170540556774e+00, 0.565891888645200e-02, 0.509341544050700e-02],
[0.482176322624625e+00, 0.482176322624625e+00, 0.146708645276380e-01],
[0.356473547507510e-01, 0.482176322624625e+00, 0.146708645276380e-01],
[0.482176322624625e+00, 0.356473547507510e-01, 0.146708645276380e-01],
[0.450239969020782e+00, 0.450239969020782e+00, 0.243508783536720e-01],
[0.995200619584370e-01, 0.450239969020782e+00, 0.243508783536720e-01],
[0.450239969020782e+00, 0.995200619584370e-01, 0.243508783536720e-01],
[0.400266239377397e+00, 0.400266239377397e+00, 0.311075508689690e-01],
[0.199467521245206e+00, 0.400266239377397e+00, 0.311075508689690e-01],
[0.400266239377397e+00, 0.199467521245206e+00, 0.311075508689690e-01],
[0.252141267970953e+00, 0.252141267970953e+00, 0.312571112186200e-01],
[0.495717464058095e+00, 0.252141267970953e+00, 0.312571112186200e-01],
[0.252141267970953e+00, 0.495717464058095e+00, 0.312571112186200e-01],
[0.162047004658461e+00, 0.162047004658461e+00, 0.248156543396650e-01],
[0.675905990683077e+00, 0.162047004658461e+00, 0.248156543396650e-01],
[0.162047004658461e+00, 0.675905990683077e+00, 0.248156543396650e-01],
[0.758758822607460e-01, 0.758758822607460e-01, 0.140560730705570e-01],
[0.848248235478508e+00, 0.758758822607460e-01, 0.140560730705570e-01],
[0.758758822607460e-01, 0.848248235478508e+00, 0.140560730705570e-01],
[0.156547269678220e-01, 0.156547269678220e-01, 0.319467617377900e-02],
[0.968690546064356e+00, 0.156547269678220e-01, 0.319467617377900e-02],
[0.156547269678220e-01, 0.968690546064356e+00, 0.319467617377900e-02],
[0.334319867363658e+00, 0.655493203809423e+00, 0.811965531899300e-02],
[0.655493203809423e+00, 0.334319867363658e+00, 0.811965531899300e-02],
[0.101869288269190e-01, 0.655493203809423e+00, 0.811965531899300e-02],
[0.655493203809423e+00, 0.101869288269190e-01, 0.811965531899300e-02],
[0.334319867363658e+00, 0.101869288269190e-01, 0.811965531899300e-02],
[0.101869288269190e-01, 0.334319867363658e+00, 0.811965531899300e-02],
[0.292221537796944e+00, 0.572337590532020e+00, 0.268057422831630e-01],
[0.572337590532020e+00, 0.292221537796944e+00, 0.268057422831630e-01],
[0.135440871671036e+00, 0.572337590532020e+00, 0.268057422831630e-01],
[0.572337590532020e+00, 0.135440871671036e+00, 0.268057422831630e-01],
[0.292221537796944e+00, 0.135440871671036e+00, 0.268057422831630e-01],
[0.135440871671036e+00, 0.292221537796944e+00, 0.268057422831630e-01],
[0.319574885423190e+00, 0.626001190286228e+00, 0.184599932108220e-01],
[0.626001190286228e+00, 0.319574885423190e+00, 0.184599932108220e-01],
[0.544239242905830e-01, 0.626001190286228e+00, 0.184599932108220e-01],
[0.626001190286228e+00, 0.544239242905830e-01, 0.184599932108220e-01],
[0.319574885423190e+00, 0.544239242905830e-01, 0.184599932108220e-01],
[0.544239242905830e-01, 0.319574885423190e+00, 0.184599932108220e-01],
[0.190704224192292e+00, 0.796427214974071e+00, 0.847686853432800e-02],
[0.796427214974071e+00, 0.190704224192292e+00, 0.847686853432800e-02],
[0.128685608336370e-01, 0.796427214974071e+00, 0.847686853432800e-02],
[0.796427214974071e+00, 0.128685608336370e-01, 0.847686853432800e-02],
[0.190704224192292e+00, 0.128685608336370e-01, 0.847686853432800e-02],
[0.128685608336370e-01, 0.190704224192292e+00, 0.847686853432800e-02],
[0.180483211648746e+00, 0.752351005937729e+00, 0.182927967700250e-01],
[0.752351005937729e+00, 0.180483211648746e+00, 0.182927967700250e-01],
[0.671657824135240e-01, 0.752351005937729e+00, 0.182927967700250e-01],
[0.752351005937729e+00, 0.671657824135240e-01, 0.182927967700250e-01],
[0.180483211648746e+00, 0.671657824135240e-01, 0.182927967700250e-01],
[0.671657824135240e-01, 0.180483211648746e+00, 0.182927967700250e-01],
[0.807113136795640e-01, 0.904625504095608e+00, 0.666563200416500e-02],
[0.904625504095608e+00, 0.807113136795640e-01, 0.666563200416500e-02],
[0.146631822248280e-01, 0.904625504095608e+00, 0.666563200416500e-02],
[0.904625504095608e+00, 0.146631822248280e-01, 0.666563200416500e-02],
[0.807113136795640e-01, 0.146631822248280e-01, 0.666563200416500e-02],
[0.146631822248280e-01, 0.807113136795640e-01, 0.666563200416500e-02]],
tp_fix=0.5, weight_fix=0.5),
19 : _QP([[0.333333333333333e+00, 0.333333333333333e+00, 0.329063313889190e-01],
[0.489609987073006e+00, 0.489609987073006e+00, 0.103307318912720e-01],
[0.207800258539870e-01, 0.489609987073006e+00, 0.103307318912720e-01],
[0.489609987073006e+00, 0.207800258539870e-01, 0.103307318912720e-01],
[0.454536892697893e+00, 0.454536892697893e+00, 0.223872472630160e-01],
[0.909262146042150e-01, 0.454536892697893e+00, 0.223872472630160e-01],
[0.454536892697893e+00, 0.909262146042150e-01, 0.223872472630160e-01],
[0.401416680649431e+00, 0.401416680649431e+00, 0.302661258694680e-01],
[0.197166638701138e+00, 0.401416680649431e+00, 0.302661258694680e-01],
[0.401416680649431e+00, 0.197166638701138e+00, 0.302661258694680e-01],
[0.255551654403098e+00, 0.255551654403098e+00, 0.304909678021980e-01],
[0.488896691193805e+00, 0.255551654403098e+00, 0.304909678021980e-01],
[0.255551654403098e+00, 0.488896691193805e+00, 0.304909678021980e-01],
[0.177077942152130e+00, 0.177077942152130e+00, 0.241592127416410e-01],
[0.645844115695741e+00, 0.177077942152130e+00, 0.241592127416410e-01],
[0.177077942152130e+00, 0.645844115695741e+00, 0.241592127416410e-01],
[0.110061053227952e+00, 0.110061053227952e+00, 0.160508035868010e-01],
[0.779877893544096e+00, 0.110061053227952e+00, 0.160508035868010e-01],
[0.110061053227952e+00, 0.779877893544096e+00, 0.160508035868010e-01],
[0.555286242518400e-01, 0.555286242518400e-01, 0.808458026178400e-02],
[0.888942751496321e+00, 0.555286242518400e-01, 0.808458026178400e-02],
[0.555286242518400e-01, 0.888942751496321e+00, 0.808458026178400e-02],
[0.126218637772290e-01, 0.126218637772290e-01, 0.207936202748500e-02],
[0.974756272445543e+00, 0.126218637772290e-01, 0.207936202748500e-02],
[0.126218637772290e-01, 0.974756272445543e+00, 0.207936202748500e-02],
[0.395754787356943e+00, 0.600633794794645e+00, 0.388487690498100e-02],
[0.600633794794645e+00, 0.395754787356943e+00, 0.388487690498100e-02],
[0.361141784841200e-02, 0.600633794794645e+00, 0.388487690498100e-02],
[0.600633794794645e+00, 0.361141784841200e-02, 0.388487690498100e-02],
[0.395754787356943e+00, 0.361141784841200e-02, 0.388487690498100e-02],
[0.361141784841200e-02, 0.395754787356943e+00, 0.388487690498100e-02],
[0.307929983880436e+00, 0.557603261588784e+00, 0.255741606120220e-01],
[0.557603261588784e+00, 0.307929983880436e+00, 0.255741606120220e-01],
[0.134466754530780e+00, 0.557603261588784e+00, 0.255741606120220e-01],
[0.557603261588784e+00, 0.134466754530780e+00, 0.255741606120220e-01],
[0.307929983880436e+00, 0.134466754530780e+00, 0.255741606120220e-01],
[0.134466754530780e+00, 0.307929983880436e+00, 0.255741606120220e-01],
[0.264566948406520e+00, 0.720987025817365e+00, 0.888090357333800e-02],
[0.720987025817365e+00, 0.264566948406520e+00, 0.888090357333800e-02],
[0.144460257761150e-01, 0.720987025817365e+00, 0.888090357333800e-02],
[0.720987025817365e+00, 0.144460257761150e-01, 0.888090357333800e-02],
[0.264566948406520e+00, 0.144460257761150e-01, 0.888090357333800e-02],
[0.144460257761150e-01, 0.264566948406520e+00, 0.888090357333800e-02],
[0.358539352205951e+00, 0.594527068955871e+00, 0.161245467617310e-01],
[0.594527068955871e+00, 0.358539352205951e+00, 0.161245467617310e-01],
[0.469335788381780e-01, 0.594527068955871e+00, 0.161245467617310e-01],
[0.594527068955871e+00, 0.469335788381780e-01, 0.161245467617310e-01],
[0.358539352205951e+00, 0.469335788381780e-01, 0.161245467617310e-01],
[0.469335788381780e-01, 0.358539352205951e+00, 0.161245467617310e-01],
[0.157807405968595e+00, 0.839331473680839e+00, 0.249194181749100e-02],
[0.839331473680839e+00, 0.157807405968595e+00, 0.249194181749100e-02],
[0.286112035056700e-02, 0.839331473680839e+00, 0.249194181749100e-02],
[0.839331473680839e+00, 0.286112035056700e-02, 0.249194181749100e-02],
[0.157807405968595e+00, 0.286112035056700e-02, 0.249194181749100e-02],
[0.286112035056700e-02, 0.157807405968595e+00, 0.249194181749100e-02],
[0.750505969759110e-01, 0.701087978926173e+00, 0.182428401189510e-01],
[0.701087978926173e+00, 0.750505969759110e-01, 0.182428401189510e-01],
[0.223861424097916e+00, 0.701087978926173e+00, 0.182428401189510e-01],
[0.701087978926173e+00, 0.223861424097916e+00, 0.182428401189510e-01],
[0.750505969759110e-01, 0.223861424097916e+00, 0.182428401189510e-01],
[0.223861424097916e+00, 0.750505969759110e-01, 0.182428401189510e-01],
[0.142421601113383e+00, 0.822931324069857e+00, 0.102585637361990e-01],
[0.822931324069857e+00, 0.142421601113383e+00, 0.102585637361990e-01],
[0.346470748167600e-01, 0.822931324069857e+00, 0.102585637361990e-01],
[0.822931324069857e+00, 0.346470748167600e-01, 0.102585637361990e-01],
[0.142421601113383e+00, 0.346470748167600e-01, 0.102585637361990e-01],
[0.346470748167600e-01, 0.142421601113383e+00, 0.102585637361990e-01],
[0.654946280829380e-01, 0.924344252620784e+00, 0.379992885530200e-02],
[0.924344252620784e+00, 0.654946280829380e-01, 0.379992885530200e-02],
[0.101611192962780e-01, 0.924344252620784e+00, 0.379992885530200e-02],
[0.924344252620784e+00, 0.101611192962780e-01, 0.379992885530200e-02],
[0.654946280829380e-01, 0.101611192962780e-01, 0.379992885530200e-02],
[0.101611192962780e-01, 0.654946280829380e-01, 0.379992885530200e-02]],
tp_fix=0.5, weight_fix=0.5),
},
'2_4' : {
2 : _QP([[ nm.sqrt(2.0/3.0), 0.0 , 4.0/3.0],
[-1/nm.sqrt(6) , 1/nm.sqrt(2), 4.0/3.0],
[-1/nm.sqrt(6) ,-1/nm.sqrt(2), 4.0/3.0]], bounds=(-1.0, 1.0)),
3 : _QP([[-1/nm.sqrt(3),-1/nm.sqrt(3), 1.0],
[ 1/nm.sqrt(3),-1/nm.sqrt(3), 1.0],
[ 1/nm.sqrt(3), 1/nm.sqrt(3), 1.0],
[-1/nm.sqrt(3), 1/nm.sqrt(3), 1.0]], bounds=(-1.0, 1.0)),
5 : _QP([[ nm.sqrt(7.0/15.0), 0.0 , 0.816326530612245],
[-nm.sqrt(7.0/15.0), 0.0 , 0.816326530612245],
[ 0.0 , nm.sqrt(7.0/15.0), 0.816326530612245],
[ 0.0 ,-nm.sqrt(7.0/15.0), 0.816326530612245],
[ 0.881917103688197, 0.881917103688197, 0.183673469387755],
[ 0.881917103688197,-0.881917103688197, 0.183673469387755],
[-0.881917103688197, 0.881917103688197, 0.183673469387755],
[-0.881917103688197,-0.881917103688197, 0.183673469387755]], bounds=(-1.0, 1.0)),
},
'3_4' : {
1 : _QP([[ 1.0/4.0, 1.0/4.0, 1.0/4.0, 1.0/6.0]], tp_fix=1.0/6.0),
2 : _QP([[ (5-nm.sqrt(5))/20 , (5-nm.sqrt(5))/20 , (5-nm.sqrt(5))/20 , 1.0/24.0],
[ (5-nm.sqrt(5))/20 , (5-nm.sqrt(5))/20 , (5+3*nm.sqrt(5))/20, 1.0/24.0],
[ (5-nm.sqrt(5))/20 , (5+3*nm.sqrt(5))/20, (5-nm.sqrt(5))/20 , 1.0/24.0],
[ (5+3*nm.sqrt(5))/20, (5-nm.sqrt(5))/20 , (5-nm.sqrt(5))/20 , 1.0/24.0]], tp_fix=1.0/6.0),
3 : _QP([[ 1.0/4.0, 1.0/4.0, 1.0/4.0,-2.0/15.0],
[ 1.0/6.0, 1.0/6.0, 1.0/6.0, 3.0/40.0],
[ 1.0/6.0, 1.0/6.0, 1.0/2.0, 3.0/40.0],
[ 1.0/6.0, 1.0/2.0, 1.0/6.0, 3.0/40.0],
[ 1.0/2.0, 1.0/6.0, 1.0/6.0, 3.0/40.0]], tp_fix=1.0/6.0),
4 : _QP([[-0.5000000000000000, -0.5000000000000000, -0.5000000000000000, -0.1052444444444440],
[-0.8571428571428570, -0.8571428571428570, -0.8571428571428570, 0.0609777777777780],
[-0.8571428571428570, -0.8571428571428570, 0.5714285714285710, 0.0609777777777780],
[-0.8571428571428570, 0.5714285714285710, -0.8571428571428570, 0.0609777777777780],
[ 0.5714285714285710, -0.8571428571428570, -0.8571428571428570, 0.0609777777777780],
[-0.2011928476664020, -0.2011928476664020, -0.7988071523335980, 0.1991111111111110],
[-0.2011928476664020, -0.7988071523335980, -0.2011928476664020, 0.1991111111111110],
[-0.7988071523335980, -0.2011928476664020, -0.2011928476664020, 0.1991111111111110],
[-0.2011928476664020, -0.7988071523335980, -0.7988071523335980, 0.1991111111111110],
[-0.7988071523335980, -0.2011928476664020, -0.7988071523335980, 0.1991111111111110],
[-0.7988071523335980, -0.7988071523335980, -0.2011928476664020, 0.1991111111111110]],
bounds=(-1.0, 1.0), tp_fix=1.0/6.0),
6 : _QP([[-0.5707942574816960, -0.5707942574816960, -0.5707942574816960, 0.0532303336775570],
[-0.2876172275549120, -0.5707942574816960, -0.5707942574816960, 0.0532303336775570],
[-0.5707942574816960, -0.2876172275549120, -0.5707942574816960, 0.0532303336775570],
[-0.5707942574816960, -0.5707942574816960, -0.2876172275549120, 0.0532303336775570],
[-0.9186520829307770, -0.9186520829307770, -0.9186520829307770, 0.0134362814070940],
[0.7559562487923320, -0.9186520829307770, -0.9186520829307770, 0.0134362814070940],
[-0.9186520829307770, 0.7559562487923320, -0.9186520829307770, 0.0134362814070940],
[-0.9186520829307770, -0.9186520829307770, 0.7559562487923320, 0.0134362814070940],
[-0.3553242197154490, -0.3553242197154490, -0.3553242197154490, 0.0738095753915400],
[-0.9340273408536530, -0.3553242197154490, -0.3553242197154490, 0.0738095753915400],
[-0.3553242197154490, -0.9340273408536530, -0.3553242197154490, 0.0738095753915400],
[-0.3553242197154490, -0.3553242197154490, -0.9340273408536530, 0.0738095753915400],
[-0.8726779962499650, -0.8726779962499650, -0.4606553370833680, 0.0642857142857140],
[-0.8726779962499650, -0.4606553370833680, -0.8726779962499650, 0.0642857142857140],
[-0.8726779962499650, -0.8726779962499650, 0.2060113295832980, 0.0642857142857140],
[-0.8726779962499650, 0.2060113295832980, -0.8726779962499650, 0.0642857142857140],
[-0.8726779962499650, -0.4606553370833680, 0.2060113295832980, 0.0642857142857140],
[-0.8726779962499650, 0.2060113295832980, -0.4606553370833680, 0.0642857142857140],
[-0.4606553370833680, -0.8726779962499650, -0.8726779962499650, 0.0642857142857140],
[-0.4606553370833680, -0.8726779962499650, 0.2060113295832980, 0.0642857142857140],
[-0.4606553370833680, 0.2060113295832980, -0.8726779962499650, 0.0642857142857140],
[0.2060113295832980, -0.8726779962499650, -0.4606553370833680, 0.0642857142857140],
[0.2060113295832980, -0.8726779962499650, -0.8726779962499650, 0.0642857142857140],
[0.2060113295832980, -0.4606553370833680, -0.8726779962499650, 0.0642857142857140]],
bounds=(-1.0, 1.0), tp_fix=1.0/6.0),
},
'3_8' : {
2 : _QP([[ 0.0 , nm.sqrt(2.0/3.0),-1/nm.sqrt(3), 2.0],
[ 0.0 ,-nm.sqrt(2.0/3.0),-1/nm.sqrt(3), 2.0],
[ nm.sqrt(2.0/3.0), 0.0 , 1/nm.sqrt(3), 2.0],
[-nm.sqrt(2.0/3.0), 0.0 , 1/nm.sqrt(3), 2.0]], bounds=(-1.0, 1.0)),
3 : _QP([[-1.0, 0.0, 0.0, 4.0/3.0],
[ 1.0, 0.0, 0.0, 4.0/3.0],
[ 0.0,-1.0, 0.0, 4.0/3.0],
[ 0.0, 1.0, 0.0, 4.0/3.0],
[ 0.0, 0.0,-1.0, 4.0/3.0],
[ 0.0, 0.0, 1.0, 4.0/3.0]], bounds=(-1.0, 1.0)),
5 : _QP([[-nm.sqrt(19.0/30.0), 0.0 , 0.0 , 320.0/361.0],
[ nm.sqrt(19.0/30.0), 0.0 , 0.0 , 320.0/361.0],
[ 0.0 ,-nm.sqrt(19.0/30.0), 0.0 , 320.0/361.0],
[ 0.0 , nm.sqrt(19.0/30.0), 0.0 , 320.0/361.0],
[ 0.0 , 0.0 ,-nm.sqrt(19.0/30.0), 320.0/361.0],
[ 0.0 , 0.0 , nm.sqrt(19.0/30.0), 320.0/361.0],
[ nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), 121.0/361.0],
[ nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), 121.0/361.0],
[ nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), 121.0/361.0],
[ nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), 121.0/361.0],
[-nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), 121.0/361.0],
[-nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), 121.0/361.0],
[-nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), nm.sqrt(19.0/33.0), 121.0/361.0],
[-nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0),-nm.sqrt(19.0/33.0), 121.0/361.0]], bounds=(-1.0, 1.0)),
},
}
del _QP
def _get_max_orders():
max_orders = {}
for key, table in six.iteritems(quadrature_tables):
orders = list(table.keys())
max_orders[key] = max(orders)
return max_orders
max_orders = _get_max_orders()
| sfepy/sfepy | sfepy/discrete/quadratures.py | Python | bsd-3-clause | 53,551 | [
"Gaussian"
] | 78cc3e653ef7f1904075982a5317075fb5fb026f766549ea01c9e0d9010245c6 |
# Copyright 2014 Douglas RAILLARD
#
# This file is part of BrownBat.
#
# BrownBat is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BrownBat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with BrownBat. If not, see <http://www.gnu.org/licenses/>.
"""
.. moduleauthor:: Douglas RAILLARD <douglas.raillard.github@gmail.com>
C langage source code generation module
This module provides C langage specific classes.
"""
import collections
import numbers
import textwrap
import re
import builtins
import inspect
import os
import copy
import brownbat.core as core
class Configuration:
"""This class holds configuration keys used to modify the behavior of the module.
"""
def __init__(self, enable_debug_comments):
"""
:param enable_debug_comments: enables automatic debugging comments in generated sources. Automatic comments are built with the line of the Python code that created the object represented and its type.
"""
self.enable_debug_comments = enable_debug_comments
default_config = Configuration(
enable_debug_comments = False
)
class Node(core.NodeBase):
"""This class is at the root of the inheritance hierarchy of this module.
It handles some features which are common to all of the classes representing C source code.
"""
config = default_config
# We must check if the comment is None to avoid infinite recursion
# because Com tries to build a TokenList (via TokenListContainer) with comment=None, which in turn
# tries to build a comment with None and so on
comment = core.EnsureNode('comment', lambda x: Com(x) if x is not None else core.PHANTOM_NODE)
# We must check if the comment is None to avoid infinite recursion
# because SingleLineCom tries to build a TokenList with comment=None, which in turn
# tries to build a comment with None and so on
side_comment = core.EnsureNode('side_comment', lambda x: SingleLineCom(x) if x is not None else core.PHANTOM_NODE)
def __init__(self, comment=None, side_comment=None, parent=None, config=None):
"""
:param comment: is the multiline comment node associated to this node. If it is not already a :class:`~brownbat.core.NodeABC`,
a :class:`.Com` will be built with what you give to it.
:param side_comment: is a single line comment associated with this node. It will be an instance of :class:`.SingleLineCom`
if it is not already a :class:`~brownbat.core.NodeABC`.
Be aware that this side comment must be displayed by the class, and sometimes it will not be printed.
:param parent: is the parent of the node if this is a :class:`NodeView`.
:param config: is the configuration object of this instance. It defaults to using the *config* class attribute,
so changing the class attribute *config* will impact all the instances that has not overriden
it by providing a configuration object explicitly.
"""
if config is not None:
self.config = config
# /!\ Be carefull here: as this class is the base class of all classes
# in this file, any constructor call here will turn into infinite
# recursion. Fortunately, side_comment is irrelevant for Backtrace
if self.config.enable_debug_comments and not isinstance(self, (Backtrace, SingleLineCom)):
self.instanciation_backtrace = Backtrace()
side_comment_backtrace = self.__class__.__name__+' created at '+self.instanciation_backtrace
# Also display backtrace of the parent object if this one is just a NodeView
if isinstance(self, NodeView):
side_comment_backtrace.extend(" (view of "+self.parent.__class__.__name__+" created at "+self.parent.instanciation_backtrace+")")
side_comment = SingleLineCom(side_comment_backtrace)
super().__init__(comment=comment, side_comment=side_comment, parent=parent)
class NodeView(core.NodeViewBase, Node):
"""This class is the C implementation of :class:`~brownbat.core.NodeViewBase` class.
"""
side_comment = core.DelegatedAttribute(
'side_comment', 'parent',
descriptor = Node.side_comment,
default_value_list = (None,)
)
"""Side comment which defaults to using the *parent.side_comment* attribute
when not set explicitly.
"""
comment = core.DelegatedAttribute(
'comment', 'parent',
descriptor = Node.comment,
default_value_list = (None,)
)
"""Comment which defaults to using the *parent.comment* attribute
when not set explicitly.
"""
class NodeContainer(core.NodeContainerBase, Node):
"""This is the C implementation of :class:`~brownbat.core.NodeContainerBase` class.
It overrides *__add__* and *__radd__* to return a :class:`.TokenListContainer` instance that
combines both operands.
"""
def __add__(self, other):
# TokenListContainer are the most agnostic containers
return TokenListContainer((self, other))
def __radd__(self, other):
# TokenListContainer are the most agnostic containers
return TokenListContainer((other, self))
class TokenListContainer(NodeContainer):
"""This class is a :class:`NodeContainer` subclass that uses :class:`TokenList` as
factory when the nodes given to it are not instances of subclasses of :class:`~brownbat.core.NodeABC`.
"""
def __init__(self, *args, **kwargs):
super().__init__(node_classinfo=TokenList, *args, **kwargs)
class TokenList(core.TokenListBase, Node):
"""This class is the C implementation of :class:`~brownbat.core.TokenListBase`."""
pass
class DelegatedTokenList(core.DelegatedTokenListBase, Node):
"""This class is the C implementation of :class:`~brownbat.core.DelegatedTokenListBase`."""
pass
class IndentedTokenList(core.IndentedTokenListBase, TokenList):
"""This class is the C implementation of :class:`~brownbat.core.IndentedTokenListBase`."""
pass
class IndentedDelegatedTokenList(core.IndentedDelegatedTokenListBase, DelegatedTokenList):
"""This class is the C implementation of :class:`~brownbat.core.IndentedDelegatedTokenListBase`."""
pass
class Backtrace(core.BacktraceBase, TokenList):
"""This class is the C implementation of :class:`~brownbat.core.BacktraceBase`.
It is printed as :class:`.SingleLineCom` when using *freestanding_str*.
"""
def freestanding_str(self, idt=None):
return SingleLineCom(('Object built at ', self)).freestanding_str(idt)
class _Expr:
__format_string = '{expr};{side_comment}'
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
side_comment = self.side_comment
snippet = '\n'+str(idt)+self.__format_string.format(
expr = self.inline_str(idt),
side_comment = side_comment.inline_str(idt)
)
return snippet
def assign(self, value):
return Expr((self," = ",value))
def cast(self, new_type):
return Expr(('((',new_type,')(',self,'))'))
def rcast(self, casted):
return Expr(('((',self,')(',casted,'))'))
def deref(self):
return Expr(('(*(',self,'))'))
def paren(self):
return Expr(('(',self,')'))
def address(self):
return Expr(('(&(',self,'))'))
def __rshift__(self, member):
"""Right shift allows to access structure/union/enum members: 'a'>>'b' will give 'a.b'."""
return Expr((self,".",member))
def __rrshift__(self, basename):
"""See right shift"""
return Expr((basename,".",self))
def __invert__(self):
""" ~expr will give (&(expr))"""
return self.address()
def __pos__(self):
""" +expr will give (*(expr))"""
return self.deref()
def __neg__(self):
"""-expr will give (expr)"""
return self.paren()
def __pow__(self, type):
"""expr1**expr2 will give ((expr1)(expr2))"""
return self.rcast(type)
def __rpow__(self, type):
"""'int'**expr will give ((int)(expr))"""
return self.cast(type)
class Expr(_Expr, IndentedTokenList):
"""This class represents a C expression.
It is a subclass of :class:`.IndentedTokenList`, and extends its *freestanding_str* method by
printing a *;* at the end of line.
"""
pass
class DelegatedExpr(_Expr, IndentedDelegatedTokenList):
"""This class represent a C expression.
This variant of :class:`.Expr` uses an attribute to hold the real :class:`.Expr`. It allows
to transparently use composition to store the expression.
"""
pass
class StmtContainer(NodeContainer, core.NonIterable):
"""This class is a :class:`.NodeContainer` that uses :class:`Expr` as its factory.
It allows the user to append plain strings for example, and expressions will be automatically built out of them.
"""
def __init__(self, node_list=None, node_classinfo=None, node_factory=None, *args, **kwargs):
node_classinfo_list = core.listify(node_classinfo)
if node_classinfo is None:
node_classinfo_list = [core.NodeABC]
# Only force node_factory if node_classinfo is empty, because a empty
# node_factory will be filled with the first element of node_classinfo by NodeContainer
if node_factory is None and node_classinfo is None:
node_factory = Expr
super().__init__(node_list=node_list, node_classinfo=node_classinfo_list, node_factory=node_factory, *args, **kwargs)
class BlockStmt(StmtContainer):
"""This class is a subclass of :class:`.StmtContainer`.
It extends *inline_str* by outputing *{* at the front and *}* at the end,
and also indent its content.
"""
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
# Hide side comment for derived class because
# they usually display it in their own format
if self.__class__ is BlockStmt:
side_comment = self.side_comment.inline_str(idt)
else:
side_comment = ''
snippet = '\n'+str(idt)+'{'+side_comment
idt.indent()
snippet += super().inline_str(idt)
idt.dedent()
snippet += '\n'+str(idt)+'}'
return snippet
class OrderedTypeContainer(StmtContainer):
"""This class is a container that automatically reorder
compound type definitions to satisfy the dependencies.
It inserts the reordered type definitions at the beginning,
and also include a forward declaration for each type, to allow
pointer cross-referencing.
"""
def inline_str(self, idt=None):
# Only touch the a copy
self_copy = copy.copy(self)
# Build a dictionary mapping the type names to the type objects
type_dict = collections.OrderedDict()
for item in self:
if isinstance(item, (Struct, Union)):
type_dict[item.name.inline_str().strip()] = item
types_to_sort_list = list()
# Build a dependency graph of unions and structures
dependency_dict = collections.defaultdict(list)
# Build a dependency graph of unions and structures that takes pointers into account
weak_dependency_dict = collections.defaultdict(list)
# Translation table used to remove character from type name to analyse weak dependencies
transtable = str.maketrans({char:None for char in '*()'})
for item in type_dict.values():
for member in item:
# Determine dependencies with the type name, to
# allow hardcoded types to be taken into account
member_type_name = member.type.inline_str().strip()
# Remove the leading part to correctly match the real type name
# WARNING: if a 'struct foo' and 'enum foo' are both declared, it will break and
# register incorrect depencencies, but it would be insane to do such a thing anyway.
for prefix in ('struct', 'enum', 'union'):
if member_type_name.startswith(prefix):
member_type_name = member_type_name[len(prefix):].lstrip()
break
# Try to find a type with the exact name
try:
type_ = type_dict[member_type_name]
# Only try to access [item] key after making sure the type exists,
# to avoid triggering the creation of an empty list, and having
# a key in dependency_dict with no dependencies
dependency_dict[item].append(type_)
# Build a list of types that will be reordered
types_to_sort_list.append(item)
# If the type name is not found, try to add it as a weak dependency
except KeyError:
# Try to find something that looks like a pointer to a known type
stripped_member_type_name = member_type_name.translate(transtable).strip()
try:
# Only try to access [item] key after making sure the type exists,
# to avoid triggering the creation of an empty list, and having
# a key in weak_dependency_dict with no dependencies
type_ = type_dict[stripped_member_type_name]
weak_dependency_dict[item].append(type_)
# Build a list of types that will be reordered
types_to_sort_list.append(item)
# If nothing was found, give up
except KeyError:
pass
# Do a topological sort of the dependency graph of the types
sorted_node_list = list()
temporary_marked = set()
permanently_marked = set()
forward_decl_type_set = set()
def visit(node):
nonlocal temporary_marked
nonlocal permanently_marked
nonlocal sorted_node_list
if node in temporary_marked:
raise ValueError('The dependency graph of compound types is not a DAG, cannot sort the type definitions')
elif node not in permanently_marked:
temporary_marked.add(node)
for dep_node in dependency_dict[node]:
visit(dep_node)
for dep_node in weak_dependency_dict[node]:
try:
# Backup all data structures in case the DFS fails
sorted_node_list_backup = copy.copy(sorted_node_list)
temporary_marked_backup = copy.copy(temporary_marked)
permanently_marked_backup = copy.copy(permanently_marked)
forward_decl_type_set_backup = copy.copy(forward_decl_type_set)
visit(dep_node)
except ValueError:
forward_decl_type_set.add(dep_node)
# Restore bookkeeping data
sorted_node_list = sorted_node_list_backup
temporary_marked = temporary_marked_backup
permanently_marked = permanently_marked_backup
permanently_marked.add(node)
temporary_marked.discard(node)
sorted_node_list.append(node)
# Only consider types that have dependencies
# types_to_sort_list may have duplicates
for node in types_to_sort_list:
visit(node)
# Build a list of nodes that do not contain the reordered type definitions
# We must be carefull, as the 'not in' operator for lists tests for equality
sorted_node_id_list = [id(node) for node in sorted_node_list]
remaining_node_list = [item for item in self if id(item) not in sorted_node_id_list]
# Build a list of forward declaration to add before type definitions
forward_decl_list = [item.forward_decl() for item in forward_decl_type_set]
# Insert the reordered type definitions at the beginning
self_copy[:] = forward_decl_list+sorted_node_list+[NewLine()]+remaining_node_list
# Print using the StmtContainer.inline_str() method
return super(OrderedTypeContainer, self_copy).inline_str(idt)
class ConditionnalStmtBase(BlockStmt):
cond = core.EnsureNode('cond', TokenList)
"""The condition of the conditional statement."""
def __init__(self, cond=None, *args, **kwargs):
"""
:param cond: the condition of the conditional statement.
"""
self.cond = cond
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
return self.__format_string.format(
cond = self.cond.inline_str(idt),
stmt = super().inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
class If(ConditionnalStmtBase):
"""This class represents the C *if* statement."""
_ConditionnalStmtBase__format_string = "if({cond}){side_comment}{stmt}"
class Else(ConditionnalStmtBase):
"""This class represents the C *else* statement.
:param cond: ignored
"""
_ConditionnalStmtBase__format_string = "else{side_comment}{stmt}"
def __init__(self, *args, **kwargs):
super().__init__(cond=None, *args, **kwargs)
class ElseIf(ConditionnalStmtBase):
"""This class represents the C *else if* statement."""
_ConditionnalStmtBase__format_string = "else if({cond}){side_comment}{stmt}"
class While(ConditionnalStmtBase):
"""This class represents the C *while* statement."""
_ConditionnalStmtBase__format_string = "while({cond}){side_comment}{stmt}"
class For(BlockStmt):
"""This class represents the C *for* statement."""
init = core.EnsureNode('init', TokenList)
"""This is the initalization expression (``a`` in ``for(a;b;c){}``)."""
cond = core.EnsureNode('cond', TokenList)
"""This is the stop condition (``b`` in ``for(a;b;c){}``)."""
action = core.EnsureNode('action', TokenList)
"""This is the expression evaluated each time(``c`` in ``for(a;b;c){}``)."""
__format_string = "for({init}; {cond}; {action}){side_comment}{stmt}"
def __init__(self, init=None, cond=None, action=None, *args, **kwargs):
# If we got a variable, we take its definition because it is a really
# common use case
if isinstance(init, Var):
self.init = init.defi()
else:
self.init = init
self.cond = cond
self.action = action
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
return self.__format_string.format(
cond = self.cond.inline_str(idt),
init = self.init.inline_str(idt),
action = self.action.inline_str(idt),
stmt = super().inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
class DoWhile(BlockStmt):
"""This class represents the C *do while* statement."""
cond = core.EnsureNode('cond', TokenList)
"""This is the stop condition."""
__format_string = "do{side_comment}{stmt}{idt_nl}while({cond});"
def __init__(self, cond=None, *args, **kwargs):
self.cond = cond
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
return self.__format_string.format(
stmt = super().inline_str(idt),
cond = self.cond.inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
class Switch(Node, core.NonIterable, collections.MutableMapping):
"""This class represents the C *switch* statement.
This class can be used as a dictionary (:class:`collections.MutableMapping`)
with the keys as the case values, and the values as the code to execute when the
tested expression matches the key.
"""
expr = core.EnsureNode('expr', TokenList)
"""This is the expression to switch on."""
__format_string = "switch({expr}){side_comment}{idt_nl}{{{stmt}{idt_nl}}}"
__case_format_string = "{idt_nl}case ({case}):{side_comment}{stmt}{auto_break}\n"
__default_format_string = "{idt_nl}default:{side_comment}{stmt}{auto_break}\n"
def __init__(self, expr=None, case_map=None, auto_break=True, *args, **kwargs):
"""
:param expr: the expression to switch on.
:type expr: :class:`.TokenList`
:param case_map: a mapping with keys used as cases and values as
the code to execute (a :class:`.StmtContainer`).
:param auto_break: a boolean indicating if a *break* statement should be
automatically inserted at the end of the code of the
cases.
.. note:: The *case_map* keys are not touched, so you may use them later, they
will not be turned into :class:`.TokenList`.
"""
self.expr = expr
processed_case_map = collections.OrderedDict()
if isinstance(case_map, collections.Mapping):
for key, value in case_map.items():
processed_case_map[key] = StmtContainer(value)
elif case_map is None: pass
else:
raise ValueError("You have to give a mapping or None for the case_map")
self.case_map = processed_case_map
self.auto_break = auto_break
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
body = ""
idt.indent()
for case, stmt in self.case_map.items():
case = TokenList.ensure_node(case)
case_string = case.inline_str(idt)
if case_string == "default":
format_string = self.__default_format_string
else:
format_string = self.__case_format_string
idt.indent()
stmt_snippet = stmt.inline_str(idt)
if self.auto_break:
auto_break = '\n'+str(idt)+"break;"
else:
auto_break = ""
idt.dedent()
body += format_string.format(
idt_nl = '\n'+str(idt),
case = case_string,
side_comment = case.side_comment.inline_str(idt),
stmt = stmt_snippet,
auto_break = auto_break
)
idt.dedent()
snippet = self.__format_string.format(
idt_nl = '\n'+str(idt),
expr = self.expr.inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
stmt = body
)
return snippet
def __copy__(self):
cls = type(self)
new_obj = cls.__new__(cls)
new_obj.__dict__.update(self.__dict__)
new_obj.case_map = copy.copy(self.case_map)
new_obj.expr = copy.copy(self.expr)
return new_obj
def __getitem__(self, key):
return self.case_map[key]
def __setitem__(self, key, value):
self.case_map[key] = StmtContainer(value)
def __delitem__(self, key):
del self.case_map[key]
def __len__(self):
return len(self.case_map)
def __iter__(self):
return iter(self.case_map)
class Var(DelegatedExpr):
storage_list = core.EnsureNode('storage_list', TokenListContainer)
type = core.EnsureNode('type',
node_factory=lambda type: TokenList(type) if type is not None else None,
node_classinfo=TokenList
)
name = core.EnsureNode('name', node_factory=TokenList)
_array_size = core.EnsureNode('_array_size',
node_factory=lambda array_size: TokenList(array_size) if array_size is not None else None,
node_classinfo=TokenList
)
initializer = core.EnsureNode('initializer',
node_factory=lambda initializer: TokenList(initializer) if initializer is not None else None,
node_classinfo=TokenList
)
@property
def array_size(self):
# If the array size is not specified, try to use the one from the type
if self._array_size is None and hasattr(self.type, 'array_size'):
return self.type.array_size
else:
return self._array_size
@array_size.setter
def array_size(self, value):
self._array_size = value
c_identifier_regex_str = "[a-zA-Z_]+[a-zA-Z0-9_]*"
var_defi_name_array_initializer_regex_str = "(?:(?P<name>"+c_identifier_regex_str+")\s*)(?:\[\s*(?P<array_size>.*?)\s*\])?(?:\s*=\s*(?P<initializer>.*?)\s*)?"
var_defi_storage_list_regex_str = "(?P<storage_list>.*?)"
var_def_type_regex_str = "(?P<type>(?:(?P<_is_a_compound>union|struct|enum)\s*(?(_is_a_compound)(?:(?:\{.*?\})|(?:"+c_identifier_regex_str+"))|"+c_identifier_regex_str+"))?(?(_is_a_compound)|"+c_identifier_regex_str+")(?:\s*\*+)?)"
# Matches a declaration or definition of a C variable with the following groups:
# * name of the variable
# * optionally, array_size. None if the declaration is not an array
# * optionally, initializer of the variable. None if not specified
var_no_type_defi_regex_str = "^\s*"+var_defi_name_array_initializer_regex_str+"\s*$"
var_no_type_defi_regex = re.compile(var_no_type_defi_regex_str)
# Matches a declaration or definition of a C variable with the following groups:
# * type of the variable
# * name of the variable
# * optionally, array_size. None if the declaration is not an array
# * optionally, initializer of the variable. None if not specified
var_defi_regex_str = "^\s*"+var_def_type_regex_str+"?\s*"+var_defi_name_array_initializer_regex_str+"\s*$"
var_defi_regex = re.compile(var_defi_regex_str)
# Matches a declaration or definition of a C variable with the following groups:
# * storage_list of the variable
# * type of the variable
# * name of the variable
# * optionally, array_size. None if the declaration is not an array
# * optionally, initializer of the variable. None if not specified
var_storage_list_defi_regex = re.compile("^\s*"+var_defi_storage_list_regex_str+"\s+"+var_def_type_regex_str+"\s*"+var_defi_name_array_initializer_regex_str+"\s*$")
def __init__(self, decl=None, storage_list=None, type=None, name=None, initializer=None, array_size=None, *args, **kwargs):
if decl is not None:
# Parse the declaration
if isinstance(decl, str):
# Try to match without a storage list and without a type
match = self.var_no_type_defi_regex.match(decl)
if match is None:
# If the previous regex failed to match, try the one with type support
match = self.var_defi_regex.match(decl)
if match is None:
# If the previous regex failed to match, try the one with type and storage list support
match = self.var_storage_list_defi_regex.match(decl)
if match is None:
raise ValueError("Cannot parse variable declaration/definition")
# Try to get the storage list if there is one
try:
decl_storage_list = match.group('storage_list').split()
except IndexError:
decl_storage_list = None
# Try to get the type if there is one
try:
decl_type = match.group('type')
except IndexError:
decl_type = None
# Remove multiple spaces before the star in pointer declarations
# for example: " *" => " *"
if decl_type is not None:
try:
first_star_index = decl_type.index('*')
decl_type = decl_type[:first_star_index].strip()+' '+decl_type[first_star_index:].strip()
# No star was found
except ValueError:
pass
decl_name = match.group('name')
decl_array_size = match.group('array_size')
decl_initializer = match.group('initializer')
# Make a shallow copy of the other Var
elif isinstance(decl, Var):
decl_storage_list = decl.storage_list
decl_type = decl.type
decl_name = decl.name
decl_array_size = decl.array_size
decl_initializer = decl.initializer
# Use a TokenList as the name of the variable, nothing else
elif isinstance(decl, core.TokenListABC):
decl_storage_list = None
decl_type = None
decl_name = decl
decl_array_size = None
decl_initializer = None
else:
raise ValueError("Cannot create a Var from "+str(builtins.type(decl)))
# If decl was None
else:
decl_storage_list = None
decl_type = None
decl_name = None
decl_array_size = None
decl_initializer = None
# User gave a declaration and specified some other parameters
# The explicitly specified parameters have higher priority
storage_list = decl_storage_list if storage_list is None else (
storage_list.split() if isinstance(storage_list, str) else storage_list
)
type = decl_type if type is None else type
name = decl_name if name is None else name
array_size = decl_array_size if array_size is None else array_size
initializer = decl_initializer if initializer is None else initializer
self.storage_list = storage_list
self.type = type
self.name = name
self.array_size = array_size
self.initializer = initializer
# Store the name in the token_list member to allow Expr magic
super().__init__(tokenlist_attr_name='name', *args, **kwargs)
def freestanding_str(self, idt=None):
return self.defi().freestanding_str(idt)
def decl(self):
return VarDecl(self)
def defi(self):
return VarDefi(self)
def extern_decl(self):
return VarExternDecl(self)
def __getitem__(self, key):
return Expr((self,"[",key,"]"))
class VarDecl(NodeView, core.NonIterable):
# Regex used to match type names using stars (pointers) and adjust spaces
star_space_handling_regex = re.compile('^\s*(?P<name>[^\*]*)(\s)*(?P<stars>\*+)\s*$')
def freestanding_str(self, idt=None, hide_initializer=False, hide_array_size=False):
idt = core.Indentation.ensure_idt(idt)
return '\n'+str(idt)+self.inline_str(idt, hide_initializer=hide_initializer, hide_array_size=hide_array_size)+';'+self.side_comment.inline_str(idt)
def inline_str(self, idt=None, hide_initializer=False, hide_array_size=False):
storage_list = " ".join(storage.inline_str(idt) for storage in self.parent.storage_list)+" "
snippet = storage_list.strip()+" "
if self.parent.type is not None:
if self.parent.array_size is not None:
snippet += self.parent.type.inline_str(idt)+" "
snippet += self.parent.inline_str(idt)
if not hide_array_size:
array_size_str = self.parent.array_size.inline_str(idt)
else:
array_size_str = ''
snippet += "["+array_size_str+"]"
else:
type_str = self.parent.type.inline_str(idt)
# See if there is any whitespace changes to apply
match_obj = self.star_space_handling_regex.match(type_str)
if match_obj:
name = match_obj.group('name') if match_obj.group('name') is not None else ''
stars = match_obj.group('stars') if match_obj.group('stars') is not None else ''
type_str = name.strip()+' '+stars.strip()
# Add a space between the type name and the variable, but only if this is
# not a pointer, in which case the space is between the type name and the stars
type_addend = '' if type_str.endswith('*') else " "
snippet += type_str+type_addend
snippet += self.parent.inline_str(idt)
else:
snippet += self.parent.inline_str(idt)
if not hide_initializer and self.parent.initializer is not None:
snippet += " = "+self.parent.initializer.inline_str(idt)
return snippet.strip()
class VarDefi(VarDecl):
pass
class VarExternDecl(VarDecl):
def __init__(self, var, hide_initializer=True, hide_array_size=True, *args, **kwargs):
self.hide_initializer=hide_initializer
self.hide_array_size = hide_array_size
super().__init__(var, *args, **kwargs)
def inline_str(self, idt=None, hide_initializer=None, hide_array_size=None):
if hide_initializer is None:
hide_initializer = self.hide_initializer
if hide_array_size is None:
hide_array_size = self.hide_array_size
return "extern "+super().inline_str(idt, hide_initializer=hide_initializer, hide_array_size=hide_array_size)
def freestanding_str(self, idt=None, hide_initializer=None, hide_array_size=None):
if hide_initializer is None:
hide_initializer = self.hide_initializer
if hide_array_size is None:
hide_array_size = self.hide_array_size
return super().freestanding_str(idt, hide_initializer=hide_initializer, hide_array_size=hide_array_size)
class Fun(BlockStmt):
name = core.EnsureNode('name', TokenList)
return_type = core.EnsureNode('return_type', TokenList)
storage_list = core.EnsureNode('storage_list', TokenListContainer)
param_list = core.EnsureNode('param_list', TokenListContainer)
def __init__(self, name=None, return_type="void", storage_list=None, param_list=None, *args, **kwargs):
self.name = name
self.return_type = return_type
self.storage_list = storage_list
self.param_list = param_list
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
return self.defi().inline_str(idt)
def __call__(self, *args):
return self.call(args)
def defi(self):
return FunDef(self)
def decl(self):
return FunDecl(self)
def call(self, param_list=None):
return FunCall(self, param_list)
class FunParam(Var):
def inline_str(self, idt=None):
return self.decl().inline_str(idt)
class FunDef(NodeView):
__format_string = "{idt_nl}{storage_list}{type}{name}({param_list}){side_comment}{body}"
def inline_str(self, idt=None):
storage_list = " ".join(storage.inline_str(idt) for storage in self.parent.storage_list)+" "
storage_list = storage_list.strip()
if storage_list:
storage_list += ' '
param_list = ", ".join(param.inline_str(idt) for param in self.parent.param_list)
param_list = param_list.strip()
if not param_list:
param_list = "void"
return self.__format_string.format(
type = self.parent.return_type.inline_str(idt)+' ',
name = self.parent.name.inline_str(idt),
param_list = param_list,
side_comment = self.parent.side_comment.inline_str(idt),
storage_list = storage_list,
body = super(Fun, self.parent).inline_str(idt),
idt_nl = '\n'+str(idt)
)
class FunDecl(NodeView):
__format_string = "{storage_list}{type}{name}({param_list});{side_comment}"
def inline_str(self, idt=None):
storage_list = " ".join(storage.inline_str(idt) for storage in self.parent.storage_list)+" "
storage_list = storage_list.strip()
if storage_list:
storage_list += ' '
param_list = ", ".join(param.inline_str(idt) for param in self.parent.param_list)
param_list = param_list.strip()
if not param_list:
param_list = "void"
return self.__format_string.format(
type = self.parent.return_type.inline_str(idt)+' ',
name = self.parent.name.inline_str(idt),
param_list = param_list,
storage_list = storage_list,
side_comment = self.side_comment.inline_str(idt)
)
class FunCall(NodeView, Expr, core.NonIterable):
param_list = core.EnsureNode('param_list', TokenListContainer)
param_joiner = ', '
__format_string = "{name}({param_list})"
def __init__(self, parent, param_list=None, param_joiner=None, *args, **kwargs):
self.param_list = param_list
if param_joiner is not None:
self.param_joiner = param_joiner
super().__init__(parent=parent, *args, **kwargs)
def inline_str(self, idt=None):
return self.__format_string.format(
name = self.parent.name.inline_str(idt),
param_list = self.param_joiner.join(param.inline_str(idt) for param in self.param_list),
)
class Type(DelegatedTokenList, core.NonIterable):
name = core.EnsureNode('name', TokenList)
array_size = core.EnsureNode('array_size ',
node_factory=lambda array_size: TokenList(array_size) if array_size is not None else None,
node_classinfo=TokenList
)
type_declaration_regex = re.compile('^\s*(?P<name>.*?)\s*(\[\s*(?P<array_size>.*?)\s*\])\s*$')
def __init__(self, name=None, array_size=None, *args, **kwargs):
match = self.type_declaration_regex.match(name) if isinstance(name, str) else None
if match:
self.name = match.group('name')
self.array_size = match.group('array_size')
if array_size is not None:
self.array_size = array_size
else:
self.name = name
self.array_size = array_size
super().__init__(tokenlist_attr_name='name', *args, **kwargs)
class TypePointer(NodeView):
def inline_str(self, idt=None):
return self.parent.inline_str(idt)+'*'
class CompoundType(BlockStmt):
name = core.EnsureNode('name', TokenList)
def __init__(self, name=None, auto_typedef=True, *args, **kwargs):
self.name = name
self.auto_typedef = auto_typedef
super().__init__(*args, **kwargs)
def anonymous(self):
return CompoundTypeAnonymousView(self)
def inline_str(self, idt=None):
return self.name.inline_str(idt)
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
if self.auto_typedef:
format_string = self.__typedef_format_string
else:
format_string = self.__format_string
# The format string do not contain the newline and indentation
# at their beginning to be consistent with the format string of
# other classes
format_string = '\n\n'+str(idt)+format_string
return format_string.format(
name = self.name.inline_str(idt),
members = super().inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
def forward_decl(self):
return CompoundTypeForwardDeclaration(self)
def ptr(self):
return TypePointer(self)
def __pos__(self):
return self.ptr()
class CompoundTypeAnonymousView(NodeView):
def inline_str(self, idt=None):
return (self.parent.__prefix_string+' {'+
self.parent.__separator_string.join(
member.decl().inline_str()
for member in self.parent
)+
self.parent.__separator_string.rstrip()+'}')
class CompoundTypeForwardDeclaration(NodeView, core.NonIterable):
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
format_string = self.parent._CompoundType__forward_declaration_format_string
if self.parent.auto_typedef:
format_string += '{idt_nl}'+self.parent._CompoundType__forward_declaration_typedef_format_string
return format_string.format(
name = self.parent.name.inline_str(idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
class EnumMember(Var):
def __init__(self, *args, **kwargs):
self.is_last_member = False
super().__init__(*args, **kwargs)
# Remove any sort of type or array_size after the object is constructed
self.type = None
self.array_size = None
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
# Stateful hack to avoid printing a trailing comma in enumeration
# is_last_member state is restored by Enum class when printing is done
if self.is_last_member:
addend = ''
else:
addend = ','
return '\n'+str(idt)+self.decl().inline_str(idt)+addend+self.side_comment.inline_str(idt)
class Enum(CompoundType):
_CompoundType__typedef_format_string = "typedef enum {name}{members} {name};{side_comment}"
_CompoundType__format_string = "enum {name}{members};{side_comment}"
_CompoundType__forward_declaration_format_string = "enum {name};{side_comment}"
_CompoundType__forward_declaration_typedef_format_string = "typedef enum {name} {name};"
_CompoundTypeAnonymousView__prefix_string = 'enum'
_CompoundTypeAnonymousView__separator_string = ', '
def __init__(self, name=None, member_list=None, auto_typedef=True, *args, **kwargs):
super().__init__(name, auto_typedef, node_list=member_list, node_classinfo=EnumMember, *args, **kwargs)
def freestanding_str(self, idt=None):
# If there is at least one enumerator, so we can take the last member because it exists
if self:
last_member = self[-1]
is_last_member_value = last_member.is_last_member
try:
last_member.is_last_member = True
snippet = super().freestanding_str(idt)
finally:
# Restore the old value in case we want to append another
# enumerator after we printed the enum once
last_member.is_last_member = is_last_member_value
else:
snippet = super().freestanding_str(idt)
return snippet
class StructMember(Var):
@property
def initializer(self):
"""Special handling of initializer here: if the initializer is queried,
None is returned, to avoid displaying it in the structure declaration.
If set, the content is redirected to default_initializer attribute,
to allow building of a default designated initializer.
"""
return None
@initializer.setter
def initializer(self, value):
# Make sure it is a TokenList, as EnsureNode would do for
# the initializer attribute of the Var class.
value = TokenList.ensure_node(value)
self.default_initializer = value
class UnionMember(Var):
pass
class _StructUnionBase(CompoundType):
_CompoundTypeAnonymousView__separator_string = '; '
class Struct(_StructUnionBase):
_CompoundType__typedef_format_string = "typedef struct {name}{members} {name};{side_comment}"
_CompoundType__format_string = "struct {name}{members};{side_comment}"
_CompoundType__forward_declaration_format_string = "struct {name};{side_comment}"
_CompoundType__forward_declaration_typedef_format_string = "typedef struct {name} {name};"
_CompoundTypeAnonymousView__prefix_string = 'struct'
def __init__(self, name=None, member_list=None, auto_typedef=True, *args, **kwargs):
super().__init__(name, auto_typedef, node_list=member_list, node_classinfo=(StructMember), *args, **kwargs)
def designated_init(self):
return StructDefaultDesignatedInitializer(self)
class StructDesignatedInitializer(Expr, collections.MutableMapping):
_default_translation_map = {int: 'int', float: 'float', str:'char *'}
def __init__(self, value_map=None, *args, **kwargs):
self.value_map = collections.OrderedDict()
if isinstance(value_map, collections.Mapping):
for key, value in value_map.items():
self[key] = TokenList.ensure_node(value)
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
# Filter out the members that do not have any initializer
snippet = '{'+', '.join(
'.'+TokenList(member).inline_str()+'='+value.inline_str()
for member,value in self.value_map.items() if value.inline_str()
)+'}'
return snippet
def struct(self, name=None, auto_typedef=True, type_translation_map=None):
def default_translator(type_translation_map, value):
"""This translator uses the type_translation_map as a mapping
of Python types to C types (strings).
"""
# TokenList are treated as a special case: we decapsulate
# the first token to find its type
if isinstance(value, core.TokenListABC):
return type_translation_map[type(value[0])]
else:
return type_translation_map[type(value)]
if callable(type_translation_map):
translate_type = type_translation_map
elif isinstance(type_translation_map, collections.Mapping):
# The translator compare the first token in TokenList, because
# values are always instances of TokenList
translate_type = lambda value: default_translator(type_translation_map, value)
elif type_translation_map is None:
translate_type = lambda value: default_translator(self._default_translation_map, value)
else:
raise ValueError('type_translation_map must be either callable or a mapping')
struct = Struct(
name = name,
auto_typedef = auto_typedef,
side_comment = self.side_comment,
comment = self.comment
)
for member, value in self.items():
# If there is a nested designated intializer, output an anonymous
# struct type for the member
if isinstance(value, StructDesignatedInitializer):
type_ = value.struct(type_translation_map=type_translation_map).anonymous()
else:
type_ = translate_type(value)
struct.append(StructMember(
name = member,
# Set the initializer, so we can use Struct.designated_init() on the resulting structure
initializer = value,
type = type_
))
return struct
def __getitem__(self, key):
# If the key is a string, try to catch any reference
# to a nested member, and forward it to the nested
# StructDesignatedInitializer instance
if isinstance(key, str):
split_key = key.split('.')
if len(split_key) > 1:
nested_member = '.'.join(split_key[1:])
return self.value_map[split_key[0]][nested_member]
return self.value_map[key]
else:
return self.value_map[key]
def __setitem__(self, key, value):
value = TokenList.ensure_node(value)
# If the key is a string, try to catch any reference
# to a nested member, and forward it to the nested
# StructDesignatedInitializer instance
if isinstance(key, str):
split_key = key.split('.')
if len(split_key) > 1:
nested_member = '.'.join(split_key[1:])
self.value_map.setdefault(split_key[0], StructDesignatedInitializer())[nested_member] = value
else:
self.value_map[key] = value
else:
self.value_map[key] = value
def __copy__(self):
cls = type(self)
new_obj = cls.__new__(cls)
new_obj.__dict__.update(self.__dict__)
new_obj.value_map = copy.copy(self.value_map)
return new_obj
def __delitem__(self, key):
del self.value_map[key]
def __len__(self):
return len(self.value_map)
def __iter__(self):
return iter(self.value_map)
class StructDefaultDesignatedInitializer(NodeView, StructDesignatedInitializer):
def inline_str(self, idt=None):
merged_initializer = collections.OrderedDict(
[(member.inline_str(),member.default_initializer)
for member in self.parent.node_list]+
list(self.items())
)
return StructDesignatedInitializer(merged_initializer).inline_str(idt)
class Union(_StructUnionBase):
_CompoundType__typedef_format_string = "typedef union {name}{members} {name};{side_comment}"
_CompoundType__format_string= "union {name}{members};{side_comment}"
_CompoundType__forward_declaration_format_string = "union {name};{side_comment}"
_CompoundType__forward_declaration_typedef_format_string = "typedef union {name} {name};"
_CompoundTypeAnonymousView__prefix_string = 'union'
def __init__(self, name=None, member_list=None, auto_typedef=True, *args, **kwargs):
super().__init__(name, auto_typedef, node_list=member_list, node_classinfo=(UnionMember,core.NodeABC), *args, **kwargs)
class Typedef(Node, core.NonIterable):
old_name = core.EnsureNode('old_name', TokenList)
name = core.EnsureNode('name', TokenList)
__format_string = "typedef {old_name} {new_name};{side_comment}"
def __init__(self, old_name=None, new_name=None, *args, **kwargs):
self.old_name = old_name
self.name = new_name
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
return self.name.inline_str(idt)
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
format_string = '\n'+str(idt)+self.__format_string
return format_string.format(
old_name = self.old_name.inline_str(idt),
new_name = self.name.inline_str(idt),
side_comment = self.side_comment.inline_str(idt)
)
class FunPtrTypedef(DelegatedTokenList, core.NonIterable):
name = core.EnsureNode('name', TokenList)
param_list = core.EnsureNode('param_list', TokenListContainer)
return_type = core.EnsureNode('return_type', TokenList)
__format_string = "typedef {return_type} (*{name})({param_list});{side_comment}"
def __init__(self, name=None, return_type=None, param_list=None, *args, **kwargs):
self.name = name
self.param_list = param_list
self.return_type = return_type
super().__init__(tokenlist_attr_name='name', *args, **kwargs)
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
format_string = '\n'+str(idt)+self.__format_string
return format_string.format(
name = self.parent.name.inline_str(idt),
return_type = self.parent.return_type.inline_str(idt),
param_list = ", ".join(param.inline_str(idt) for param in self.parent.param_list),
side_comment = self.side_comment.inline_str(idt)
)
class OneLinePrepBase(Node, core.NonIterable):
directive = core.EnsureNode('directive', TokenList)
_format_string = "#{directive} {param_list}{side_comment}"
def __init__(self, directive=None, *args, **kwargs):
self.directive = directive
super().__init__(*args, **kwargs)
def _inline_str(self, param_list, idt=None):
param_list = core.listify(param_list)
param_list = " ".join(param.inline_str(idt) for param in param_list)
return self._format_string.format(
directive = self.directive.inline_str(idt),
param_list = param_list,
side_comment = self.side_comment.inline_str(idt)
)
class PrepDef(OneLinePrepBase):
name = core.EnsureNode('name', TokenList)
value = core.EnsureNode('value', TokenList)
def __init__(self, name=None, value=None, *args, **kwargs):
self.name = name
self.value = value
super().__init__("define", *args, **kwargs)
def inline_str(self, idt=None):
return self._inline_str((self.name, self.value), idt)
class PrepInclude(OneLinePrepBase):
header_path = core.EnsureNode('header_path', TokenList)
def __init__(self, header_path=None, system=False, *args, **kwargs):
self.header_path = header_path
self.system = system
super().__init__("include", *args, **kwargs)
def inline_str(self, idt=None):
if self.system:
processed_path = TokenList(('<', self.header_path, '>'))
else:
processed_path = TokenList(('"', self.header_path, '"'))
return self._inline_str(processed_path, idt)
class PrepIf(StmtContainer):
cond = core.EnsureNode('cond', TokenList)
__format_string = "#if {cond}{side_comment}{stmt}{idt_nl}#endif //{cond}"
def __init__(self, cond=None, indent_content=False, *args, **kwargs):
self.cond = cond
self.indent_content = indent_content
super().__init__(*args, **kwargs)
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
if self.indent_content:
stmt_idt = copy.copy(idt)
stmt_idt.indent()
else:
stmt_idt = idt
return self.__format_string.format(
cond = self.cond.inline_str(idt),
stmt = super().inline_str(stmt_idt),
side_comment = self.side_comment.inline_str(idt),
idt_nl = '\n'+str(idt)
)
class PrepIfDef(PrepIf):
_PrepIf__format_string = "#ifdef {cond}{side_comment}{stmt}{idt_nl}#endif //ifdef {cond}"
class PrepIfNDef(PrepIf):
_PrepIf__format_string = "#ifndef {cond}{side_comment}{stmt}{idt_nl}#endif //ifndef {cond}"
class BaseCom(Node, core.NonIterable):
pass
class Com(TokenListContainer, BaseCom):
# String put at the front of the comment
start_string = '/* '
# String put at the end of the comment
end_string = ' */'
# Maximum line length when auto_wrap is enabled
max_line_length = 80
def __init__(self, node_list=None, auto_wrap=True, *args, **kwargs):
self.auto_wrap = auto_wrap
super().__init__(node_list, *args, **kwargs)
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
string = "\n".join(comment.inline_str(idt) for comment in self)
if not string:
return ''
split_string = string.split("\n")
first_line = split_string [0]
last_line = split_string [-1]
# If the first line is not empty, add a few spaces to indentation to
# align the paragraphs correctly
if first_line.strip():
sub_idt = len(self.start_string)*" "
start_string = self.start_string
else:
sub_idt = ""
start_string = self.start_string.strip()
joiner = "\n"+str(idt)+sub_idt
if last_line.strip():
end_string = self.end_string
else:
end_string = self.end_string.strip()
# If the comment cannot fit on a single line and auto wrapping is enabled
if self.auto_wrap and any(
len(line) > self.max_line_length-len(start_string)-len(end_string)-len(str(idt))
for line in split_string
):
string = "\n".join(textwrap.wrap(
string,
width=self.max_line_length,
expand_tabs=False,
replace_whitespace=False,
))
string = start_string+string+end_string+self.side_comment.inline_str(idt)
string = string.replace("\n", joiner)
return string
def freestanding_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
return '\n\n'+str(idt)+self.inline_str(idt)
class SingleLineCom(DelegatedTokenList, BaseCom):
content = core.EnsureNode('content', TokenList)
start_string = ' //'
def __init__(self, comment=None, *args, **kwargs):
self.content = comment
super().__init__(tokenlist_attr_name='content', *args, **kwargs)
def inline_str(self, idt=None):
content_string = super().inline_str(idt)
return self.start_string+content_string
freestanding_str = inline_str
class NewLine(Node):
def inline_str(self, idt=None):
idt = core.Indentation.ensure_idt(idt)
return '\n'+str(idt)
freestanding_str = inline_str
class HeaderFile(PrepIfNDef):
include_guard_define = core.EnsureNode('include_guard_define', TokenList)
def __init__(self, filename=None, include_guard=None, template=None, node_list=None, *args, **kwargs):
if include_guard is None and filename is not None:
self.include_guard_define = core.format_string(filename, 'UPPER_UNDERSCORE_CASE')+'_H_'
else:
self.include_guard_define = include_guard
node_list = core.listify(node_list)+[PrepDef(self.include_guard_define)]
super().__init__(core.NodeAttrProxy(self, 'include_guard_define'), indent_content=False, node_list=node_list, *args, **kwargs)
| DouglasRaillard/BrownBat | brownbat/C.py | Python | lgpl-3.0 | 58,225 | [
"VisIt"
] | d56a480023064fde52311a6802f084a7ff61d9b4f7beea6e3b6afdc1423ede22 |
"""
Generalized linear models currently supports estimation using the one-parameter
exponential families
References
----------
Gill, Jeff. 2000. Generalized Linear Models: A Unified Approach.
SAGE QASS Series.
Green, PJ. 1984. "Iteratively reweighted least squares for maximum
likelihood estimation, and some robust and resistant alternatives."
Journal of the Royal Statistical Society, Series B, 46, 149-192.
Hardin, J.W. and Hilbe, J.M. 2007. "Generalized Linear Models and
Extensions." 2nd ed. Stata Press, College Station, TX.
McCullagh, P. and Nelder, J.A. 1989. "Generalized Linear Models." 2nd ed.
Chapman & Hall, Boca Rotan.
"""
import numpy as np
from . import families
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import statsmodels.base.model as base
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
from statsmodels.tools.sm_exceptions import (PerfectSeparationError,
DomainWarning)
__all__ = ['GLM']
def _check_convergence(criterion, iteration, atol, rtol):
return np.allclose(criterion[iteration], criterion[iteration + 1],
atol=atol, rtol=rtol)
class GLM(base.LikelihoodModel):
__doc__ = """
Generalized Linear Models class
GLM inherits from statsmodels.base.model.LikelihoodModel
Parameters
-----------
endog : array-like
1d array of endogenous response variable. This array can be 1d or 2d.
Binomial family models accept a 2d array with two columns. If
supplied, each observation is expected to be [success, failure].
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
family : family class instance
The default is Gaussian. To specify the binomial distribution
family = sm.family.Binomial()
Each family can take a link instance as an argument. See
statsmodels.family.family for more information.
offset : array-like or None
An offset to be included in the model. If provided, must be
an array whose length is the number of rows in exog.
exposure : array-like or None
Log(exposure) will be added to the linear prediction in the model. Exposure
is only valid if the log link is used. If provided, it must be an array
with the same length as endog.
freq_weights : array-like
1d array of frequency weights. The default is None. If None is selected
or a blank value, then the algorithm will replace with an array of 1's
with length equal to the endog.
WARNING: Using weights is not verified yet for all possible options
and results, see Notes.
%(extra_params)s
Attributes
-----------
df_model : float
`p` - 1, where `p` is the number of regressors including the intercept.
df_resid : float
The number of observation `n` minus the number of regressors `p`.
endog : array
See Parameters.
exog : array
See Parameters.
family : family class instance
A pointer to the distribution family of the model.
freq_weights : array
See Parameters.
mu : array
The estimated mean response of the transformed variable.
n_trials : array
See Parameters.
normalized_cov_params : array
`p` x `p` normalized covariance of the design / exogenous data.
pinv_wexog : array
For GLM this is just the pseudo inverse of the original design.
scale : float
The estimate of the scale / dispersion. Available after fit is called.
scaletype : str
The scaling used for fitting the model. Available after fit is called.
weights : array
The value of the weights after the last iteration of fit.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.scotland.load()
>>> data.exog = sm.add_constant(data.exog)
Instantiate a gamma family model with the default link function.
>>> gamma_model = sm.GLM(data.endog, data.exog,
... family=sm.families.Gamma())
>>> gamma_results = gamma_model.fit()
>>> gamma_results.params
array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,
-0.00000015, -0.00051868, -0.00000243])
>>> gamma_results.scale
0.0035842831734919055
>>> gamma_results.deviance
0.087388516416999198
>>> gamma_results.pearson_chi2
0.086022796163805704
>>> gamma_results.llf
-83.017202161073527
See also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer to are already
arrays and these arrays are changed, endog and exog will change.
Using frequency weights: Frequency weights produce the same results as repeating
observations by the frequencies (if those are integers). This is verified for all
basic results with nonrobust or heteroscedasticity robust ``cov_type``. Other
robust covariance types have not yet been verified, and at least the small sample
correction is currently not based on the correct total frequency count.
It is not yet desided whether all the different types of residuals will be
based on weighted residuals. Currently, deviance and pearson residuals,
as well as working and response residuals are weighted, while Anscombe
residuals are unweighted. Consequently, Pearson and deviance residuals
provide a correct measure for the scale and dispersion, but will be
proportional to the frequency weights for outlier measures.
**Attributes**
df_model : float
Model degrees of freedom is equal to p - 1, where p is the number
of regressors. Note that the intercept is not reported as a
degree of freedom.
df_resid : float
Residual degrees of freedom is equal to the number of observation n
minus the number of regressors p.
endog : array
See above. Note that `endog` is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exposure : array-like
Include ln(exposure) in model with coefficient constrained to 1. Can
only be used if the link is the logarithm function.
exog : array
See above. Note that `exog` is a reference to the data so that if
data is already an array and it is changed, then `exog` changes
as well.
freq_weights : array
See above. Note that `freq_weights` is a reference to the data so that
if data i already an array and it is changed, then `freq_weights`
changes as well.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
The distribution family of the model. Can be any family in
statsmodels.families. Default is Gaussian.
mu : array
The mean response of the transformed variable. `mu` is the value of
the inverse of the link function at lin_pred, where lin_pred is the
linear predicted value of the WLS fit of the transformed variable.
`mu` is only available after fit is called. See
statsmodels.families.family.fitted of the distribution family for more
information.
n_trials : array
See above. Note that `n_trials` is a reference to the data so that if
data is already an array and it is changed, then `n_trials` changes
as well. `n_trials` is the number of binomial trials and only available
with that distribution. See statsmodels.families.Binomial for more
information.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
offset : array-like
Include offset in model with coefficient constrained to 1.
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
GLM has no whiten method, so this is just the pseudo inverse of the
design.
The pseudoinverse is approximately equal to (X.T X)^(-1)X.T
scale : float
The estimate of the scale / dispersion of the model fit. Only
available after fit is called. See GLM.fit and GLM.estimate_scale
for more information.
scaletype : str
The scaling used for fitting the model. This is only available after
fit is called. The default is None. See GLM.fit for more information.
weights : array
The value of the weights after the last iteration of fit. Only
available after fit is called. See statsmodels.families.family for
the specific distribution weighting functions.
""" % {'extra_params' : base._missing_param_doc}
def __init__(self, endog, exog, family=None, offset=None,
exposure=None, freq_weights=None, missing='none', **kwargs):
if (family is not None) and not isinstance(family.link, tuple(family.safe_links)):
import warnings
warnings.warn("The %s link function does not respect the domain of the %s family." %
(family.link.__class__.__name__, family.__class__.__name__),
DomainWarning)
if exposure is not None:
exposure = np.log(exposure)
if offset is not None: # this should probably be done upstream
offset = np.asarray(offset)
self.freq_weights = freq_weights
super(GLM, self).__init__(endog, exog, missing=missing,
offset=offset, exposure=exposure,
freq_weights=freq_weights, **kwargs)
self._check_inputs(family, self.offset, self.exposure, self.endog,
self.freq_weights)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
self.nobs = self.endog.shape[0]
#things to remove_data
self._data_attr.extend(['weights', 'pinv_wexog', 'mu', 'freq_weights',
'_offset_exposure', 'n_trials'])
# register kwds for __init__, offset and exposure are added by super
self._init_keys.append('family')
self._setup_binomial()
# Construct a combined offset/exposure term. Note that
# exposure has already been logged if present.
offset_exposure = 0.
if hasattr(self, 'offset'):
offset_exposure = self.offset
if hasattr(self, 'exposure'):
offset_exposure = offset_exposure + self.exposure
self._offset_exposure = offset_exposure
self.scaletype = None
def initialize(self):
"""
Initialize a generalized linear model.
"""
# TODO: intended for public use?
self.history = {'fittedvalues' : [],
'params' : [np.inf],
'deviance' : [np.inf]}
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_model = np_matrix_rank(self.exog) - 1
if (self.freq_weights is not None) and \
(self.freq_weights.shape[0] == self.endog.shape[0]):
self.wnobs = self.freq_weights.sum()
self.df_resid = self.wnobs - self.df_model - 1
else:
self.wnobs = self.exog.shape[0]
self.df_resid = self.exog.shape[0] - self.df_model - 1
def _check_inputs(self, family, offset, exposure, endog, freq_weights):
# Default family is Gaussian
if family is None:
family = families.Gaussian()
self.family = family
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError("exposure can only be used with the log "
"link function")
elif exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
if offset is not None:
if offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if freq_weights is not None:
if freq_weights.shape[0] != endog.shape[0]:
raise ValueError("freq weights not the same length as endog")
if len(freq_weights.shape) > 1:
raise ValueError("freq weights has too many dimensions")
if self.freq_weights is None:
self.freq_weights = np.ones((endog.shape[0]))
if np.shape(self.freq_weights) == () and self.freq_weights > 1:
self.freq_weights = (self.freq_weights *
np.ones((endog.shape[0])))
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609, copied from discrete_model.CountModel
kwds = super(GLM, self)._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def loglike_mu(self, mu, scale=1.):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
return self.family.loglike(mu, self.endog, self.exog,
self.freq_weights, scale)
def loglike(self, params, scale=None):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
lin_pred = np.dot(self.exog, params) + self._offset_exposure
expval = self.family.link.inverse(lin_pred)
if scale is None:
scale = self.estimate_scale(expval)
llf = self.family.loglike(self.endog, expval, self.freq_weights,
scale)
return llf
def score_obs(self, params, scale=None):
"""score first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
score_factor = self.score_factor(params, scale=scale)
return score_factor[:, None] * self.exog
def score(self, params, scale=None):
"""score, first derivative of the loglikelihood function
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score : ndarray_1d
The first derivative of the loglikelihood function calculated as
the sum of `score_obs`
"""
return self.score_obs(params, scale=scale).sum(0)
def score_factor(self, params, scale=None):
"""weights for score for each observation
This can be considered as score residuals.
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_factor : ndarray_1d
A 1d weight vector used in the calculation of the score_obs.
The score_obs are obtained by `score_factor[:, None] * exog`
"""
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
score_factor = (self.endog - mu) / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
score_factor *= self.freq_weights
if not scale == 1:
score_factor /= scale
return score_factor
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
# calculating eim_factor
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
eim_factor = 1 / (self.family.link.deriv(mu)**2 *
self.family.variance(mu))
eim_factor *= self.freq_weights * self.n_trials
if not observed:
if not scale == 1:
eim_factor /= scale
return eim_factor
# calculating oim_factor, eim_factor is with scale=1
score_factor = self.score_factor(params, scale=1.)
if eim_factor.ndim > 1 or score_factor.ndim > 1:
raise RuntimeError('something wrong')
tmp = self.family.variance(mu) * self.family.link.deriv2(mu)
tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)
tmp = score_factor * eim_factor * tmp
# correct for duplicatee freq_weights in oim_factor and score_factor
tmp /= self.freq_weights
oim_factor = eim_factor * (1 + tmp)
if tmp.ndim > 1:
raise RuntimeError('something wrong')
if not scale == 1:
oim_factor /= scale
return oim_factor
def hessian(self, params, scale=None, observed=True):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
factor = self.hessian_factor(params, scale=scale, observed=observed)
hess = -np.dot(self.exog.T * factor, self.exog)
return hess
def information(self, params, scale=None):
"""
Fisher information matrix.
"""
return self.hessian(params, scale=scale, observed=False)
def score_test(self, params_constrained, k_constraints=None,
exog_extra=None, observed=True):
"""score test for restrictions or for omitted variables
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix..
Parameters
----------
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
not yet verified for case with scale not equal to 1.
"""
if exog_extra is None:
if k_constraints is None:
raise ValueError('if exog_extra is None, then k_constraints'
'needs to be given')
score = self.score(params_constrained)
hessian = self.hessian(params_constrained, observed=observed)
else:
#exog_extra = np.asarray(exog_extra)
if k_constraints is None:
k_constraints = 0
ex = np.column_stack((self.exog, exog_extra))
k_constraints += ex.shape[1] - self.exog.shape[1]
score_factor = self.score_factor(params_constrained)
score = (score_factor[:, None] * ex).sum(0)
hessian_factor = self.hessian_factor(params_constrained,
observed=observed)
hessian = -np.dot(ex.T * hessian_factor, ex)
from scipy import stats
# TODO check sign, why minus?
chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))
pval = stats.chi2.sf(chi2stat, k_constraints)
# return a stats results instance instead? Contrast?
return chi2stat, pval, k_constraints
def _update_history(self, tmp_result, mu, history):
"""
Helper method to update history during iterative fit.
"""
history['params'].append(tmp_result.params)
history['deviance'].append(self.family.deviance(self.endog, mu,
self.freq_weights))
return history
def estimate_scale(self, mu):
"""
Estimates the dispersion/scale.
Type of scale can be chose in the fit method.
Parameters
----------
mu : array
mu is the mean response estimate
Returns
-------
Estimate of scale
Notes
-----
The default scale for Binomial and Poisson families is 1. The default
for the other families is Pearson's Chi-Square estimate.
See also
--------
statsmodels.glm.fit for more information
"""
if not self.scaletype:
if isinstance(self.family, (families.Binomial, families.Poisson)):
return 1.
else:
resid = self.endog - mu
return ((self.freq_weights * (np.power(resid, 2) /
self.family.variance(mu))).sum() /
(self.df_resid))
if isinstance(self.scaletype, float):
return np.array(self.scaletype)
if isinstance(self.scaletype, str):
if self.scaletype.lower() == 'x2':
resid = self.endog - mu
return ((self.freq_weights * (np.power(resid, 2) /
self.family.variance(mu))).sum() /
(self.df_resid))
elif self.scaletype.lower() == 'dev':
return (self.family.deviance(self.endog, mu,
self.freq_weights) /
(self.df_resid))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):
"""
Tweedie specific function to estimate scale and the variance parameter.
The variance parameter is also referred to as p, xi, or shape.
Parameters
----------
mu : array-like
Fitted mean response variable
method : str, defaults to 'brentq'
Scipy optimizer used to solve the Pearson equation. Only brentq
currently supported.
low : float, optional
Low end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 1.01.
high : float, optional
High end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 5.
Returns
-------
power : float
The estimated shape or power
"""
if method == 'brentq':
from scipy.optimize import brentq
def psi_p(power, mu):
scale = ((self.freq_weights * (self.endog - mu) ** 2 /
(mu ** power)).sum() / self.df_resid)
return (np.sum(self.freq_weights * ((self.endog - mu) ** 2 /
(scale * (mu ** power)) - 1) *
np.log(mu)) / self.freq_weights.sum())
power = brentq(psi_p, low, high, args=(mu))
else:
raise NotImplementedError('Only brentq can currently be used')
return power
def predict(self, params, exog=None, exposure=None, offset=None,
linear=False):
"""
Return predicted values for a design matrix
Parameters
----------
params : array-like
Parameters / coefficients of a GLM.
exog : array-like, optional
Design / exogenous data. Is exog is None, model exog is used.
exposure : array-like, optional
Exposure time values, only can be used with the log link
function. See notes for details.
offset : array-like, optional
Offset values. See notes for details.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link function at
the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Any `exposure` and `offset` provided here take precedence over
the `exposure` and `offset` used in the model fit. If `exog`
is passed as an argument here, then any `exposure` and
`offset` values in the fit will be ignored.
Exposure values must be strictly positive.
"""
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
if exposure is not None and not isinstance(self.family.link,
families.links.Log):
raise ValueError("exposure can only be used with the log link function")
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(exposure)
if exog is None:
exog = self.exog
linpred = np.dot(exog, params) + offset + exposure
if linear:
return linpred
else:
return self.family.fitted(linpred)
def get_distribution(self, params, scale=1, exog=None, exposure=None,
offset=None):
"""
Returns a random number generator for the predictive distribution.
Parameters
----------
params : array-like
The model parameters.
scale : scalar
The scale parameter.
exog : array-like
The predictor variable matrix.
Returns a frozen random number generator object. Use the
``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog, exposure, offset, linear=False)
import scipy.stats.distributions as dist
if isinstance(self.family, families.Gaussian):
return dist.norm(loc=fit, scale=np.sqrt(scale))
elif isinstance(self.family, families.Binomial):
return dist.binom(n=1, p=fit)
elif isinstance(self.family, families.Poisson):
return dist.poisson(mu=fit)
elif isinstance(self.family, families.Gamma):
alpha = fit / float(scale)
return dist.gamma(alpha, scale=scale)
else:
raise ValueError("get_distribution not implemented for %s" % self.family.name)
def _setup_binomial(self):
# this checks what kind of data is given for Binomial.
# family will need a reference to endog if this is to be removed from
# preprocessing
self.n_trials = np.ones((self.endog.shape[0])) # For binomial
if isinstance(self.family, families.Binomial):
tmp = self.family.initialize(self.endog, self.freq_weights)
self.endog = tmp[0]
self.n_trials = tmp[1]
def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,
full_output=True, disp=False, max_start_irls=3, **kwargs):
"""
Fits a generalized linear model for a given family.
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is family-specific and is given by the
``family.starting_mu(endog)``. If start_params is given then the
initial mean will be calculated as ``np.dot(exog, start_params)``.
maxiter : int, optional
Default is 100.
method : string
Default is 'IRLS' for iteratively reweighted least squares.
Otherwise gradient optimization is used.
tol : float
Convergence tolerance. Default is 1e-8.
scale : string or float, optional
`scale` can be 'X2', 'dev', or a float
The default value is None, which uses `X2` for Gamma, Gaussian,
and Inverse Gaussian.
`X2` is Pearson's chi-square divided by `df_resid`.
The default is 1 for the Binomial and Poisson families.
`dev` is the deviance divided by df_resid
cov_type : string
The type of parameter estimate covariance matrix to compute.
cov_kwds : dict-like
Extra arguments for calculating the covariance of the parameter
estimates.
use_t : bool
If True, the Student t-distribution is used for inference.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
Not used if methhod is IRLS.
disp : bool, optional
Set to True to print convergence messages. Not used if method is
IRLS.
max_start_irls : int
The number of IRLS iterations used to obtain starting
values for gradient optimization. Only relevant if
`method` is set to something other than 'IRLS'.
If IRLS fitting used, the following additional parameters are
available:
atol : float, optional
The absolute tolerance criterion that must be satisfied. Defaults
to ``tol``. Convergence is attained when:
:math:`rtol * prior + atol > abs(current - prior)`
rtol : float, optional
The relative tolerance criterion that must be satisfied. Defaults
to 0 which means ``rtol`` is not used. Convergence is attained
when:
:math:`rtol * prior + atol > abs(current - prior)`
tol_criterion : str, optional
Defaults to ``'deviance'``. Can optionally be ``'params'``.
"""
self.scaletype = scale
if method.lower() == "irls":
return self._fit_irls(start_params=start_params, maxiter=maxiter,
tol=tol, scale=scale, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t, **kwargs)
else:
return self._fit_gradient(start_params=start_params,
method=method,
maxiter=maxiter,
tol=tol, scale=scale,
full_output=full_output,
disp=disp, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t,
max_start_irls=max_start_irls,
**kwargs)
def _fit_gradient(self, start_params=None, method="newton",
maxiter=100, tol=1e-8, full_output=True,
disp=True, scale=None, cov_type='nonrobust',
cov_kwds=None, use_t=None, max_start_irls=3,
**kwargs):
"""
Fits a generalized linear model for a given family iteratively
using the scipy gradient optimizers.
"""
if (max_start_irls > 0) and (start_params is None):
irls_rslt = self._fit_irls(start_params=start_params, maxiter=max_start_irls,
tol=tol, scale=scale, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t, **kwargs)
start_params = irls_rslt.params
rslt = super(GLM, self).fit(start_params=start_params, tol=tol,
maxiter=maxiter, full_output=full_output,
method=method, disp=disp, **kwargs)
mu = self.predict(rslt.params)
scale = self.estimate_scale(mu)
glm_results = GLMResults(self, rslt.params,
rslt.normalized_cov_params / scale,
scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
# TODO: iteration count is not always available
history = {'iteration': 0}
if full_output:
glm_results.mle_retvals = rslt.mle_retvals
if 'iterations' in rslt.mle_retvals:
history['iteration'] = rslt.mle_retvals['iterations']
glm_results.method = method
glm_results.fit_history = history
return GLMResultsWrapper(glm_results)
def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Fits a generalized linear model for a given family using
iteratively reweighted least squares (IRLS).
"""
atol = kwargs.get('atol')
rtol = kwargs.get('rtol', 0.)
tol_criterion = kwargs.get('tol_criterion', 'deviance')
atol = tol if atol is None else atol
endog = self.endog
wlsexog = self.exog
if start_params is None:
start_params = np.zeros(self.exog.shape[1], np.float)
mu = self.family.starting_mu(self.endog)
lin_pred = self.family.predict(mu)
else:
lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure
mu = self.family.fitted(lin_pred)
dev = self.family.deviance(self.endog, mu, self.freq_weights)
if np.isnan(dev):
raise ValueError("The first guess on the deviance function "
"returned a nan. This could be a boundary "
" problem and should be reported.")
# first guess on the deviance is assumed to be scaled by 1.
# params are none to start, so they line up with the deviance
history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])
converged = False
criterion = history[tol_criterion]
# This special case is used to get the likelihood for a specific
# params vector.
if maxiter == 0:
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
wls_results = lm.RegressionResults(self, start_params, None)
iteration = 0
for iteration in range(maxiter):
self.weights = (self.freq_weights * self.n_trials *
self.family.weights(mu))
wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)
- self._offset_exposure)
wls_results = lm.WLS(wlsendog, wlsexog, self.weights).fit()
lin_pred = np.dot(self.exog, wls_results.params) + self._offset_exposure
mu = self.family.fitted(lin_pred)
history = self._update_history(wls_results, mu, history)
self.scale = self.estimate_scale(mu)
if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
converged = _check_convergence(criterion, iteration + 1, atol,
rtol)
if converged:
break
self.mu = mu
glm_results = GLMResults(self, wls_results.params,
wls_results.normalized_cov_params,
self.scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
glm_results.method = "IRLS"
history['iteration'] = iteration + 1
glm_results.fit_history = history
glm_results.converged = converged
return GLMResultsWrapper(glm_results)
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, refit=False, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method :
Only the `elastic_net` approach is currently implemented.
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array-like
Starting values for `params`.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
Returns
-------
An array, or a GLMResults object of the same type returned by `fit`.
Notes
-----
The penalty is the ``elastic net`` penalty, which is a
combination of L1 and L2 penalties.
The function that is minimized is: ..math::
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
"""
from statsmodels.base.elastic_net import fit_elasticnet
if method != "elastic_net":
raise ValueError("method for fit_regularied must be elastic_net")
defaults = {"maxiter" : 50, "L1_wt" : 1, "cnvrg_tol" : 1e-10,
"zero_tol" : 1e-10}
defaults.update(kwargs)
result = fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
refit=refit,
**defaults)
self.mu = self.predict(result.params)
self.scale = self.estimate_scale(self.mu)
return result
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
from patsy import DesignInfo
from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0) # we get a wrapper back
res._results.params = params
res._results.normalized_cov_params = cov
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = lc
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
# TODO: the next is not the best. history should bin in results
res._results.model.history = res_constr.model.history
return res
class GLMResults(base.LikelihoodModelResults):
"""
Class to contain GLM results.
GLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelReesults
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion
-2 * `llf` + 2*(`df_model` + 1)
bic : float
Bayes Information Criterion
`deviance` - `df_resid` * log(`nobs`)
deviance : float
See statsmodels.families.family for the distribution-specific deviance
functions.
df_model : float
See GLM.df_model
df_resid : float
See GLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `iterations`,
`deviance` and `params`.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
llf : float
Value of the loglikelihood function evalued at params.
See statsmodels.families.family for distribution-specific
loglikelihoods.
model : class instance
Pointer to GLM model instance that called fit.
mu : array
See GLM docstring.
nobs : float
The number of observations n.
normalized_cov_params : array
See GLM docstring
null_deviance : float
The value of the deviance function for the model fit with a constant
as the only regressor.
params : array
The coefficients of the fitted model. Note that interpretation
of the coefficients often depends on the distribution family and the
data.
pearson_chi2 : array
Pearson's Chi-Squared statistic is defined as the sum of the squares
of the Pearson residuals.
pinv_wexog : array
See GLM docstring.
pvalues : array
The two-tailed p-values for the parameters.
resid_anscombe : array
Anscombe residuals. See statsmodels.families.family for distribution-
specific Anscombe residuals.
resid_deviance : array
Deviance residuals. See statsmodels.families.family for distribution-
specific deviance residuals.
resid_pearson : array
Pearson residuals. The Pearson residuals are defined as
(`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution
specific variance function. See statsmodels.families.family and
statsmodels.families.varfuncs for more information.
resid_response : array
Respnose residuals. The response residuals are defined as
`endog` - `fittedvalues`
resid_working : array
Working residuals. The working residuals are defined as
`resid_response`/link'(`mu`). See statsmodels.family.links for the
derivatives of the link functions. They are defined analytically.
scale : float
The estimate of the scale / dispersion for the model fit.
See GLM.fit and GLM.estimate_scale for more information.
stand_errors : array
The standard errors of the fitted GLM. #TODO still named bse
See Also
--------
statsmodels.base.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale,
cov_type='nonrobust', cov_kwds=None, use_t=None):
super(GLMResults, self).__init__(model, params,
normalized_cov_params=
normalized_cov_params, scale=scale)
self.family = model.family
self._endog = model.endog
self.nobs = model.endog.shape[0]
self._freq_weights = model.freq_weights
if isinstance(self.family, families.Binomial):
self._n_trials = self.model.n_trials
else:
self._n_trials = 1
self.df_resid = model.df_resid
self.df_model = model.df_model
self.pinv_wexog = model.pinv_wexog
self._cache = resettable_cache()
# are these intermediate results needed or can we just
# call the model's attributes?
# for remove data and pickle without large arrays
self._data_attr.extend(['results_constrained', '_freq_weights'])
self.data_in_cache = getattr(self, 'data_in_cache', [])
self.data_in_cache.extend(['null', 'mu'])
self._data_attr_model = getattr(self, '_data_attr_model', [])
self._data_attr_model.append('mu')
# robust covariance
from statsmodels.base.covtype import get_robustcov_results
if use_t is None:
self.use_t = False # TODO: class default
else:
self.use_t = use_t
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cache_readonly
def resid_response(self):
return self._n_trials * (self._endog-self.mu)
@cache_readonly
def resid_pearson(self):
return (np.sqrt(self._n_trials) * (self._endog-self.mu) /
np.sqrt(self.family.variance(self.mu)))
@cache_readonly
def resid_working(self):
# Isn't self.resid_response is already adjusted by _n_trials?
val = (self.resid_response / self.family.link.deriv(self.mu))
val *= self._n_trials
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self._endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self._endog, self.fittedvalues)
@cache_readonly
def pearson_chi2(self):
chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)
chisq *= self._freq_weights
chisqsum = np.sum(chisq)
return chisqsum
@cache_readonly
def fittedvalues(self):
return self.mu
@cache_readonly
def mu(self):
return self.model.predict(self.params)
@cache_readonly
def null(self):
endog = self._endog
model = self.model
exog = np.ones((len(endog), 1))
kwargs = {}
if hasattr(model, 'offset'):
kwargs['offset'] = model.offset
if hasattr(model, 'exposure'):
kwargs['exposure'] = model.exposure
if len(kwargs) > 0:
return GLM(endog, exog, family=self.family, **kwargs).fit().fittedvalues
else:
wls_model = lm.WLS(endog, exog,
weights=self._freq_weights * self._n_trials)
return wls_model.fit().fittedvalues
@cache_readonly
def deviance(self):
return self.family.deviance(self._endog, self.mu, self._freq_weights)
@cache_readonly
def null_deviance(self):
return self.family.deviance(self._endog, self.null, self._freq_weights)
@cache_readonly
def llnull(self):
return self.family.loglike(self._endog, self.null,
self._freq_weights, scale=self.scale)
@cache_readonly
def llf(self):
_modelfamily = self.family
val = _modelfamily.loglike(self._endog, self.mu,
self._freq_weights, scale=self.scale)
return val
@cache_readonly
def aic(self):
return -2 * self.llf + 2*(self.df_model+1)
@cache_readonly
def bic(self):
return (self.deviance -
(self.model.wnobs - self.df_model - 1) *
np.log(self.model.wnobs))
def get_prediction(self, exog=None, exposure=None, offset=None,
transform=True, linear=False,
row_labels=None):
import statsmodels.regression._prediction as linpred
pred_kwds = {'exposure': exposure, 'offset': offset, 'linear': True}
# two calls to a get_prediction duplicates exog generation if patsy
res_linpred = linpred.get_prediction(self, exog=exog, transform=transform,
row_labels=row_labels, pred_kwds=pred_kwds)
pred_kwds['linear'] = False
res = pred.get_prediction_glm(self, exog=exog, transform=transform,
row_labels=row_labels,
linpred=res_linpred,
link=self.model.family.link,
pred_kwds=pred_kwds)
return res
get_prediction.__doc__ = pred.get_prediction_glm.__doc__
def remove_data(self):
#GLM has alias/reference in result instance
self._data_attr.extend([i for i in self.model._data_attr
if not '_data.' in i])
super(self.__class__, self).remove_data()
#TODO: what are these in results?
self._endog = None
self._freq_weights = None
self._n_trials = None
remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc' : ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc' : ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc' : ''}
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Model Family:', [self.family.__class__.__name__]),
('Link Function:', [self.family.link.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
('No. Iterations:',
["%d" % self.fit_history['iteration']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Scale:', [self.scale]),
('Log-Likelihood:', None),
('Deviance:', ["%#8.5g" % self.deviance]),
('Pearson chi2:', ["%#6.3g" % self.pearson_chi2])
]
if title is None:
title = "Generalized Linear Model Regression Results"
#create summary tables
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, # [],
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
#diagnostic table is not used yet:
#smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary for regression Results
Parameters
-----------
yname : string
Name of the dependent variable (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
self.method = 'IRLS'
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class GLMResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'resid_anscombe' : 'rows',
'resid_deviance' : 'rows',
'resid_pearson' : 'rows',
'resid_response' : 'rows',
'resid_working' : 'rows'
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GLMResultsWrapper, GLMResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
#data.exog = add_constant(data.exog)
GLMmod = GLM(data.endog, data.exog).fit()
GLMT = GLMmod.summary(returns='tables')
## GLMT[0].extend_right(GLMT[1])
## print(GLMT[0])
## print(GLMT[2])
GLMTp = GLMmod.summary(title='Test GLM')
"""
From Stata
. webuse beetle
. glm r i.beetle ldose, family(binomial n) link(cloglog)
Iteration 0: log likelihood = -79.012269
Iteration 1: log likelihood = -76.94951
Iteration 2: log likelihood = -76.945645
Iteration 3: log likelihood = -76.945645
Generalized linear models No. of obs = 24
Optimization : ML Residual df = 20
Scale parameter = 1
Deviance = 73.76505595 (1/df) Deviance = 3.688253
Pearson = 71.8901173 (1/df) Pearson = 3.594506
Variance function: V(u) = u*(1-u/n) [Binomial]
Link function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]
AIC = 6.74547
Log likelihood = -76.94564525 BIC = 10.20398
------------------------------------------------------------------------------
| OIM
r | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
beetle |
2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783
3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867
|
ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658
_cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116
------------------------------------------------------------------------------
"""
#NOTE: wfs dataset has been removed due to a licensing issue
# example of using offset
#data = sm.datasets.wfs.load()
# get offset
#offset = np.log(data.exog[:,-1])
#exog = data.exog[:,:-1]
# convert dur to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference category
# convert res to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# convert edu to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference categories and add intercept
#exog = sm.add_constant(exog[:,[1,2,3,4,5,7,8,10,11,12]])
#endog = np.round(data.endog)
#mod = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()
#res1 = GLM(endog, exog, family=sm.families.Poisson(),
# offset=offset).fit(tol=1e-12, maxiter=250)
#exposuremod = GLM(endog, exog, family=sm.families.Poisson(),
# exposure = data.exog[:,-1]).fit(tol=1e-12,
# maxiter=250)
#assert(np.all(res1.params == exposuremod.params))
| phobson/statsmodels | statsmodels/genmod/generalized_linear_model.py | Python | bsd-3-clause | 64,398 | [
"Gaussian"
] | 034486a64296d502c189b36d416a31c4d83a5b66c061c7d641e953128619d297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.