text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class recommenderCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'get_insight': ('name', ),
'get_recommendation': ('name', ),
'list_insights': ('parent', 'page_size', 'page_token', 'filter', ),
'list_recommendations': ('parent', 'page_size', 'page_token', 'filter', ),
'mark_insight_accepted': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_claimed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_failed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_succeeded': ('name', 'etag', 'state_metadata', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=recommenderCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the recommender client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-recommender
|
scripts/fixup_recommender_v1_keywords.py
|
Python
|
apache-2.0
| 6,452
|
[
"VisIt"
] |
d2e15d4d08fb5d7f915dfa314de527bccd7f53fde4b05aa4b24f613211f2a1bd
|
#!/usr/bin/env python
#
# $File: samplingSeparateVSPs.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
# create an age-structured population with a disease
import random
pop = sim.Population(10000, loci=10, infoFields='age')
sim.initGenotype(pop, freq=[0.3, 0.7])
sim.initInfo(pop, lambda: random.randint(0, 70), infoFields='age')
pop.setVirtualSplitter(sim.InfoSplitter(cutoff=(20, 40), field='age'))
# different age group has different penetrance
sim.maPenetrance(pop, loci=5, penetrance=(0.1, 0.2, 0.3), subPops=[(0,1)])
sim.maPenetrance(pop, loci=5, penetrance=(0.2, 0.4, 0.6), subPops=[(0,2)])
# count the number of affected individuals in each group
sim.stat(pop, numOfAffected=True, subPops=[(0,1), (0,2)], vars='numOfAffected_sp')
print(pop.dvars((0,1)).numOfAffected, pop.dvars((0,2)).numOfAffected)
#
from simuPOP.sampling import drawRandomSample
sample = drawRandomSample(pop, sizes=[500, 500], subPops=[(0,1), (0,2)])
# virtual subpopulations are rearranged to different subpopulations.
print(sample.subPopSizes())
|
BoPeng/simuPOP
|
docs/samplingSeparateVSPs.py
|
Python
|
gpl-2.0
| 2,041
|
[
"VisIt"
] |
96207f9fa2b2b3153d288d770337493f39cb017f49f9f6ef18116130fb1cd38b
|
import unittest
from unittest.mock import patch
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from meerkat_abacus.config import config
from meerkat_abacus import model
from meerkat_abacus.pipeline_worker.process_steps import add_links
from meerkat_abacus.consumer.database_setup import create_db
class TestAddLinks(unittest.TestCase):
def setUp(self):
create_db(config.DATABASE_URL, drop=True)
engine = create_engine(config.DATABASE_URL)
model.form_tables(config)
model.Base.metadata.create_all(engine)
self.engine = create_engine(config.DATABASE_URL)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def tearDown(self):
con = self.engine.connect()
table = model.form_tables(config)["demo_case"]
con.execute(table.__table__.delete())
table = model.form_tables(config)["demo_alert"]
con.execute(table.__table__.delete())
test_links = (
{"Case": [{
"name": "alert_investigation",
"to_form": "demo_alert",
"from_form": "demo_case",
"from_column": "alert_id",
"to_column": "alert_id",
"method": "match",
"order_by": "visit_data;date",
"uuid": "meta/instanceID"
}]
},
{"alert_investigation": {
"name": "alert_investigation",
"to_form": "demo_alert",
"from_form": "demo_case",
"from_column": "alert_id",
"to_column": "alert_id",
"method": "match",
"order_by": "visit_data;date",
"uuid": "meta/instanceID"
}})
@patch.object(add_links.util, 'get_links',
return_value=test_links)
def test_add_to_links(self, get_links_mock):
al = add_links.AddLinks(config, self.session)
existing_data = [{
"uuid": "a",
"data": {
"visit_date": "2017-01-14T05:38:33.482144",
"icd_code": "A01",
"patientid": "1",
"alert_id": "a1",
"module": "ncd",
"intro./visit": "new",
"id": "1"
}
},
{
"uuid": "b",
"data": {
"visit_date": "2017-01-14T05:38:33.482144",
"icd_code": "A01",
"patientid": "1",
"alert_id": "a2",
"module": "ncd",
"intro./visit": "new",
"id": "2"
}
}
]
table = model.form_tables(config)["demo_case"]
con = self.engine.connect()
con.execute(table.__table__.insert(), existing_data)
con.close()
test_data = {"type": "Case",
"original_form": "demo_alert",
"link_data": {"alert_investigation": [{"alert_id": "a1"}]}
}
results = al.run("data", test_data)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["data"]["raw_data"],
existing_data[0]["data"])
self.assertEqual(results[0]["data"]["link_data"],
test_data["link_data"])
test_links2 = (
{"Case": [{
"name": "alert_investigation",
"to_form": "demo_alert",
"from_form": "demo_case",
"from_column": "alert_id",
"to_column": "alert_id",
"method": "match",
"order_by": "visit_data;date",
"uuid": "meta/instanceID"
}]
},
{"alert_investigation": {
"name": "alert_investigation",
"to_form": "demo_alert",
"from_form": "demo_case",
"from_column": "alert_id",
"to_column": "alert_id",
"method": "match",
"order_by": "visit_data;date",
"uuid": "meta/instanceID"
}})
@patch.object(add_links.util, 'get_links',
return_value=test_links2)
def test_add_from_links(self, get_links_mock):
config.country_config["alert_id_length"] = 1
al = add_links.AddLinks(config, self.session)
existing_data = [{
"uuid": "a",
"data": {
"alert_id": "1",
}
}
]
table = model.form_tables(config)["demo_alert"]
con = self.engine.connect()
con.execute(table.__table__.insert(), existing_data)
con.close()
test_data = {"type": "Case",
"original_form": "demo_case",
"raw_data": {"alert_id": "1",
"intro./visit": "new"}
}
results = al.run("data", test_data)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["data"]["raw_data"],
test_data["raw_data"])
self.assertEqual(results[0]["data"]["link_data"],
{"alert_investigation": [existing_data[0]["data"]]})
test_links3 = (
{"Case": [{
"name": "return_visit",
"to_form": "demo_case",
"from_form": "demo_case",
"from_column": "link_id",
"to_column": "link_id",
"method": "lower_match",
"order_by": "visit_date;date",
"uuid": "meta/instanceID",
"to_condition": "visit:return"
}]
},
{"return_visit": {
"name": "return_visit",
"to_form": "demo_case",
"from_form": "demo_case",
"from_column": "link_id",
"to_column": "link_id",
"method": "lower_match",
"order_by": "visit_date;date",
"uuid": "meta/instanceID",
"to_condition": "visit:return"
}})
@patch.object(add_links.util, 'get_links',
return_value=test_links3)
def test_self_link_lower_match(self, get_links_mock):
config.country_config["alert_id_length"] = 1
al = add_links.AddLinks(config, self.session)
existing_data = [{
"uuid": "a",
"data": {
"visit_date": "2017-01-14T05:38:33.482144",
"icd_code": "A01",
"patientid": "1",
"alert_id": "aa",
"module": "ncd",
"intro./visit": "new",
"id": "1"
}
},
{
"uuid": "b",
"data": {
"visit_date": "2017-01-17T05:38:33.482144",
"link_id": "AA",
"visit": "return",
"id": "2"
}
}
]
table = model.form_tables(config)["demo_case"]
con = self.engine.connect()
con.execute(table.__table__.insert(), existing_data)
con.close()
test_data = {"type": "Case",
"original_form": "demo_case",
"link_data": {"return_visit": [{
"link_id": "Aa",
"visit": "return",
"id": "3",
"visit_date": "2017-01-16T05:38:33.482144"}]
}
}
results = al.run("data", test_data)
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0]["data"]["link_data"]["return_visit"]),
2)
# Make sure they are in right order
self.assertEqual(results[0]["data"]["link_data"]["return_visit"][0]["id"],
"3")
self.assertEqual(results[0]["data"]["link_data"]["return_visit"][1]["id"],
"2")
|
who-emro/meerkat_abacus
|
meerkat_abacus/pipeline_worker/tests/test_add_links.py
|
Python
|
mit
| 7,798
|
[
"VisIt"
] |
c3ea7f7bab6a61a10430c9dba7279c48743a948e8e19cc74d96367ab3cd1316a
|
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}-{{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{prefix}} {{first_name}} {{last_name}}",
)
# extracted from https://www.bfs.admin.ch/bfs/it/home/statistiche/popolazione/nascite-decessi/nomi-svizzera.assetdetail.3243313.html # noqa E501
first_names_male = (
"Peter",
"Hans",
"Daniel",
"Thomas",
"Andreas",
"Martin",
"Markus",
"Michael",
"Christian",
"Stefan",
"Walter",
"Urs",
"Marco",
"Bruno",
"Patrick",
"Werner",
"René",
"Marcel",
"Beat",
"Roland",
"Kurt",
"Josef",
"David",
"Simon",
"Rolf",
"Heinz",
"Rudolf",
"Paul",
"Roger",
"Christoph",
"Ernst",
"Pascal",
"Adrian",
"Lukas",
"Marc",
"Robert",
"Reto",
"Manuel",
"Fabian",
"Alfred",
"Philipp",
"Jürg",
"Matthias",
"Stephan",
"Franz",
"Anton",
"André",
"Alexander",
"Samuel",
"Jan",
"Johann",
"Luca",
"Max",
"Roman",
"Mario",
"Fritz",
"Ulrich",
"Dominik",
"Karl",
"Tobias",
"Oliver",
"Florian",
"Antonio",
"Benjamin",
"Sandro",
"Bernhard",
"Jonas",
"Felix",
"Raphael",
"Kevin",
"Erich",
"Fabio",
"Jakob",
"Sven",
"Dario",
"Giuseppe",
"Remo",
"Nicolas",
"Albert",
"Erwin",
"Richard",
"Nico",
"Michel",
"José",
"Claudio",
"Tim",
"Noah",
"Joel",
"Heinrich",
"Jörg",
"Robin",
"Sebastian",
"Armin",
"Guido",
"Silvan",
"Lars",
"Ivan",
"Julian",
"Alois",
"Francesco",
"Sascha",
"Dominic",
"Johannes",
"Georg",
"Gabriel",
"Manfred",
"Herbert",
"Otto",
"Alessandro",
"Gerhard",
"Patrik",
"Gian",
"Mathias",
"Leon",
"Willi",
"Eduard",
"Nicola",
"Hugo",
"Ali",
"Yves",
"Elias",
"Hermann",
"Philippe",
"Leo",
"Emil",
"Frank",
"Dieter",
"Friedrich",
"Luis",
"Giovanni",
"Niklaus",
"Alex",
"Roberto",
"Rafael",
"Hanspeter",
"Diego",
"Nils",
"Leandro",
"Ramon",
"Severin",
"Salvatore",
"Mike",
"Alain",
"Timo",
"Carlos",
"Arthur",
"Yannick",
"Eric",
"Angelo",
"Ivo",
"Wolfgang",
"Matteo",
"Joël",
"Andrin",
"Pius",
"Moritz",
"Valentin",
"Louis",
"Wilhelm",
"Renato",
"Levin",
"Silvio",
"Willy",
"Andrea",
"Jonathan",
"Jean",
"Livio",
"Loris",
"Damian",
"Theodor",
"Michele",
"Vincenzo",
"Elia",
"Ralph",
"Klaus",
"Eugen",
"Mark",
"Konrad",
"Denis",
"Norbert",
"Lorenz",
"Viktor",
"Mehmet",
"Marko",
"Kilian",
"Hans-Peter",
"Cédric",
"Ralf",
"Aaron",
"Maximilian",
"Carlo",
"Alessio",
"Olivier",
"Jürgen",
"Luigi",
"Philip",
"Lucas",
"Mauro",
"Janis",
"Cyrill",
"Linus",
"Davide",
"Othmar",
"Flavio",
"Nino",
"Arnold",
"Nick",
"Rainer",
"Domenico",
"Adolf",
"Emanuel",
"Oskar",
"Ben",
"Joshua",
"Leonardo",
"Franco",
"Pierre",
"John",
"Gregor",
"Fernando",
"Marius",
"Claude",
"Edwin",
"Colin",
"Mustafa",
"Pedro",
"Stefano",
"Sergio",
"Dominique",
"Juan",
"Nikola",
"Enrico",
"Jens",
"Daniele",
"Thierry",
"Jose",
"Liam",
"Francisco",
"Ricardo",
"Rico",
"Christof",
"Aleksandar",
"Dennis",
"Mohamed",
"Joseph",
"Charles",
"Noel",
"Miguel",
"Laurin",
"Milan",
"Reinhard",
"Lionel",
"Dragan",
"Hasan",
"Paulo",
"Edgar",
"Silas",
"Hubert",
"Helmut",
"Ibrahim",
"Ruben",
"Timon",
"Vincent",
"Christopher",
"Finn",
"Ronny",
"Kaspar",
"Mattia",
"Lorenzo",
"Pietro",
"Björn",
"Hansruedi",
"Gottfried",
"Joachim",
"Benno",
"Harald",
"Jorge",
"Cedric",
"Nevio",
"Paolo",
"Gianluca",
"Boris",
"Kai",
"Maurizio",
"Steven",
"Mischa",
"Patric",
"Zoran",
"Mirco",
"Marvin",
"Dirk",
"Benedikt",
"Uwe",
"Hans-Rudolf",
"Maurice",
"Massimo",
"Hansjörg",
"Jeremy",
"Niklas",
"Ahmet",
"Fridolin",
"Dejan",
"Goran",
"Micha",
"Mohammad",
"Ronald",
"Bernd",
"Mirko",
"Erik",
"Jason",
"Tiago",
"Riccardo",
"Jérôme",
"Igor",
"Siegfried",
"Pasquale",
"Andri",
"Tom",
"Ueli",
"Amir",
"Cyril",
"Adriano",
"Alberto",
"Ferdinand",
"Justin",
"Raffael",
"Julien",
"Lenny",
"Luka",
"Marcus",
"Pirmin",
"Janik",
"Julius",
"Meinrad",
"Adam",
"James",
"Hüseyin",
"Alexandre",
"Rocco",
"Luc",
"Victor",
"João",
"Andres",
"Luan",
"Flurin",
"Filip",
"Ismail",
"Danilo",
"Laurent",
"Raffaele",
"Ahmed",
"Günter",
"Joao",
"Rui",
"Xaver",
"Fabrizio",
"William",
"Vito",
"Miroslav",
"Lino",
"Albin",
"Jean-Pierre",
"Basil",
"Till",
"Horst",
"Romeo",
"Aldo",
"Murat",
"Harry",
"Alfons",
"Pablo",
"Bernard",
"Noé",
"Luciano",
"August",
"Levi",
"Nando",
"Fabrice",
"Raymond",
"Jamie",
"Georges",
"Steffen",
"Serge",
"Cristian",
"Samir",
"António",
"Marlon",
"Omar",
"Lian",
"Oscar",
"Yanick",
"Armando",
"Nikolaus",
"Dylan",
"Hannes",
"Sacha",
"Nuno",
"Toni",
"Dino",
"Elmar",
"Arno",
"Joaquim",
"Sasa",
"Henry",
"Vladimir",
"Arben",
"Ryan",
"Bekim",
"Milos",
"Giorgio",
"Ludwig",
"Leonard",
"Adnan",
"Gilbert",
"Yannik",
"Aron",
"Iwan",
"Maik",
"Dimitri",
"Erhard",
"François",
"Gabriele",
"Sami",
"Elio",
"Antonino",
"Fynn",
"Simone",
"Andrew",
"Alan",
"Nenad",
"Frédéric",
"Etienne",
"Janick",
"Steve",
"Christophe",
"Gianni",
"Urban",
"Anthony",
"Deniz",
"Jon",
"Alejandro",
"Axel",
"Ian",
"Theo",
"Andrej",
"Brian",
"Lucien",
"Gino",
"Clemens",
"Yanik",
"Adem",
"Emir",
"Tino",
"Miro",
"Enis",
"Gregory",
"Danijel",
"Osman",
"Michal",
"Carmine",
"Orlando",
"Enes",
"Giuliano",
"Timothy",
"Fredy",
"Besnik",
"Vitor",
"Holger",
"Kim",
"Eduardo",
"Petar",
"Jacques",
"Karim",
"Darko",
"Gustav",
"Emilio",
"Mateo",
"Alban",
"Marek",
"Oswald",
"Noël",
"Donato",
"Mohammed",
"Roy",
"Kay",
"Nathan",
"Enea",
"Silvano",
"Josip",
"Valerio",
"Artur",
"Besim",
"Mika",
"Torsten",
"Romano",
"Heiko",
"Yusuf",
"Chris",
"Naim",
"Burim",
"Gaetano",
"Hans-Ulrich",
"Olaf",
"Maurus",
"Volker",
"Jean-Claude",
"Henri",
"Nik",
"Rodrigo",
"Florin",
"Mael",
"Amar",
"Agron",
"Muhamed",
"Tristan",
"Valon",
"Ahmad",
"Ilir",
"Javier",
"Lorin",
"Yanis",
"Fatmir",
"Bajram",
"Carmelo",
"Agim",
"Enzo",
"Moreno",
"Cornel",
"Andy",
"Jeton",
"Blerim",
"Bojan",
"Federico",
"Attila",
"Juri",
"Tomas",
"Valentino",
"Ismet",
"Jannik",
"Ruedi",
"Afrim",
"Yannic",
"Ramadan",
"Alfredo",
"Josua",
"Cosimo",
"Gerardo",
"Bastian",
"Filippo",
"Raoul",
"Halil",
"Yann",
"Georgios",
"Jannis",
"Nicholas",
"Sean",
"Wilfried",
"Günther",
"Dusan",
"Beda",
"Gerold",
"Gottlieb",
"Filipe",
"Ilija",
"Carl",
"Ardian",
"Marcello",
"Enver",
"Dean",
"Dion",
"Tenzin",
"Zeljko",
"Carsten",
"Diogo",
"Alen",
"Egon",
"Aurel",
"Yannis",
"Edin",
"Hans-Jörg",
"Tomislav",
"Mohamad",
"Bujar",
"Raul",
"Slobodan",
"Driton",
"Maxim",
"Francis",
"Hansueli",
"Ivica",
"Nelson",
"Emanuele",
"Konstantin",
"Fred",
"Naser",
"Gerd",
"Kristian",
"Selim",
"Corsin",
"Dietmar",
"George",
"Piotr",
"Giacomo",
"Ingo",
"Andre",
"Malik",
"Lothar",
"Jochen",
"Sinan",
"Thorsten",
"Tiziano",
"Gilles",
"Avni",
"Jann",
"Lio",
"Niels",
"Emmanuel",
"Leonhard",
"Lorik",
"Aurelio",
"Gion",
"Liridon",
"Marino",
"Can",
"Kenan",
"Ewald",
"Stéphane",
"Dalibor",
"Jozef",
"Noe",
"Bryan",
"Dan",
"Santiago",
"Damiano",
"Arian",
"Rosario",
"Giancarlo",
"Nathanael",
"Emre",
"Stephen",
"Hassan",
"Jovan",
"Egzon",
"Reinhold",
"Tomasz",
"Vittorio",
"Patrice",
"Tibor",
"Jost",
"Elvis",
"Lean",
"Henrik",
"Musa",
"Noa",
"Udo",
"Almir",
"Van",
"Dietrich",
"Mladen",
"Armend",
"Arlind",
"Milo",
"Arsim",
"Bashkim",
"Dimitrios",
"Matthew",
"Ömer",
"Abdullah",
"Hakan",
"Gerald",
"Tommaso",
"Joris",
"Damir",
"Vinzenz",
"Marcos",
"Raphaël",
"Ennio",
"Melvin",
"Leander",
"Kuno",
"Massimiliano",
"Maël",
"Anto",
"Branko",
"Fadil",
"Kemal",
"Muhammed",
"Hendrik",
"Pawel",
"Jeremias",
"Léon",
"Leano",
"Rémy",
"Giulio",
"Muhamet",
"Lulzim",
"Konstantinos",
"Pavel",
"Rinaldo",
"Omer",
"Simeon",
"Gian-Luca",
"Maurin",
"Antoine",
"Frederik",
"Janic",
"Faton",
"Marcin",
"Sébastien",
"Cem",
"Curdin",
"Endrit",
"Nemanja",
"Karsten",
"Renzo",
"Jerome",
"Krzysztof",
"Jeffrey",
"Sebastiano",
"Ernesto",
"Lazar",
"Ramazan",
"Gérard",
"Ajan",
"Emin",
"Ioannis",
"Jesus",
"Alfonso",
"Yasin",
"Jaron",
"Alexis",
"Orhan",
"Artan",
"Morris",
"Angel",
"Janosch",
"Rene",
"Shaban",
"Jakub",
"Loïc",
"Kristijan",
"Enrique",
"Skender",
"Gianfranco",
"Mathieu",
"Xavier",
"Mathis",
"Didier",
"Arif",
"Hamza",
"Jacob",
"Leart",
"Laszlo",
"Predrag",
"Mentor",
"Wendelin",
"Luís",
"Constantin",
"Erion",
"Berat",
"Dardan",
"Melchior",
"Serkan",
"Dorian",
"Eren",
"Fatih",
"Luzius",
"Nebojsa",
"Metin",
"Diar",
"Rino",
"Ekrem",
"Isa",
"Jetmir",
"Edward",
"Nikolaos",
"Gazmend",
"Haris",
"Kian",
"Ensar",
"Mirsad",
"Danny",
"Senad",
"Donat",
"Bilal",
"Ron",
"Nael",
"Guy",
"Julio",
"Kujtim",
"Kushtrim",
"Lutz",
"Balthasar",
"Rouven",
"Lias",
"Neil",
"Abraham",
"Magnus",
"Sérgio",
"Hansjürg",
"Said",
"Ismael",
"Detlef",
"Umberto",
"Admir",
"Jayden",
"Jaime",
"Karl-Heinz",
"Tomás",
"Florim",
"Achim",
"Devin",
"Maxime",
"Fitim",
"Jean-Marc",
"Rayan",
"Sadik",
"Tarik",
"Abdul",
"Jack",
"Mergim",
"Nelio",
"Sam",
"Flamur",
"Ignaz",
"Samuele",
"Tony",
"Petr",
"Waldemar",
"Arda",
"Ardit",
"Lukasz",
"Milorad",
"Nicolai",
"Ramiz",
"Aziz",
"Kamil",
"Rinor",
"Safet",
"Piero",
"Erkan",
"Niko",
"Zsolt",
"Ernest",
"Miodrag",
"Alvaro",
"Astrit",
"Edmund",
"Jules",
"Cristiano",
"Ivano",
"Kenneth",
"Saverio",
"Semir",
"Burak",
"Theophil",
"Altin",
"Andrzej",
"Jonah",
"Jiri",
"Salih",
"Zoltán",
"Ferenc",
"Grzegorz",
"Irfan",
"Johan",
"Kaan",
"Süleyman",
"Hussein",
"Rexhep",
"Besart",
"Janos",
"Labinot",
"Onur",
"Stjepan",
"Domenic",
"Siro",
"Abel",
"Florent",
"Christos",
"Swen",
"Branislav",
"Mato",
"Amin",
"Matej",
"Slavko",
"Jusuf",
"Luke",
"Slavisa",
"Erol",
"Gabor",
"Jasmin",
"Visar",
"Sinisa",
"Isidor",
"Merlin",
"Claus",
"Marin",
"Zoltan",
"Muhammad",
"Neo",
"Zeno",
"Istvan",
"Adis",
"Edon",
"Gil",
"Leopold",
"Hartmut",
"Raimund",
"Ken",
"Csaba",
"Kerim",
"Norman",
"Lucio",
"László",
"Marjan",
"Damjan",
"Eugenio",
"Domingos",
"Reiner",
"Augusto",
"Gzim",
"Nazmi",
"Laurenz",
"Zlatko",
"Jaroslav",
"Nevin",
"Biagio",
"Felice",
"Balz",
"Boban",
"Marcelo",
"Caspar",
"Ledion",
"Rodolfo",
"Aldin",
"Matti",
"Remzi",
"Ljubisa",
"Til",
"Péter",
"Umut",
"Baris",
"Lirim",
"Mehdi",
"Edmond",
"Gonçalo",
"Jasin",
"Niclas",
"Jordan",
"Mahmoud",
"Stanislav",
"Cornelius",
"Jona",
"Khaled",
"Quentin",
"Gökhan",
"Imer",
"Volkan",
"Harun",
"Miran",
"Damien",
"Gennaro",
"Jari",
"Marian",
"Rüdiger",
"Albrecht",
"Mile",
"Thiago",
"Yvan",
"Alwin",
"Gani",
"Mahmut",
"Pero",
"Evan",
"Fisnik",
"Idriz",
"Sergej",
"Sabri",
"Felipe",
"István",
"Dave",
"Hans-Jürgen",
"Jean-Luc",
"Kastriot",
"Mariusz",
"Arne",
"Faruk",
"Gebhard",
"German",
"Tamás",
"Anes",
"Arbnor",
"Mats",
"Drilon",
"Fábio",
"Mihajlo",
"Sedat",
"Tahir",
)
# extracted from https://www.bfs.admin.ch/bfs/it/home/statistiche/popolazione/nascite-decessi/nomi-svizzera.assetdetail.3243318.html # noqa E501
first_names_female = (
"Maria",
"Anna",
"Ursula",
"Ruth",
"Elisabeth",
"Sandra",
"Monika",
"Claudia",
"Verena",
"Nicole",
"Barbara",
"Silvia",
"Andrea",
"Marie",
"Daniela",
"Christine",
"Karin",
"Marianne",
"Erika",
"Margrit",
"Brigitte",
"Susanne",
"Rita",
"Laura",
"Sarah",
"Katharina",
"Rosmarie",
"Esther",
"Heidi",
"Anita",
"Manuela",
"Rosa",
"Doris",
"Sonja",
"Beatrice",
"Yvonne",
"Gertrud",
"Jacqueline",
"Sara",
"Irene",
"Ana",
"Franziska",
"Cornelia",
"Fabienne",
"Gabriela",
"Patricia",
"Martina",
"Julia",
"Edith",
"Eva",
"Isabelle",
"Sabrina",
"Nathalie",
"Alexandra",
"Corinne",
"Angela",
"Melanie",
"Alice",
"Nadine",
"Jessica",
"Denise",
"Elena",
"Vanessa",
"Simone",
"Anne",
"Regula",
"Susanna",
"Carmen",
"Sophie",
"Caroline",
"Emma",
"Nina",
"Tanja",
"Catherine",
"Sabine",
"Lara",
"Petra",
"Lea",
"Céline",
"Jasmin",
"Therese",
"Stefanie",
"Johanna",
"Nadia",
"Tamara",
"Chantal",
"Martha",
"Michelle",
"Christina",
"Marina",
"Adelheid",
"Dora",
"Monique",
"Rahel",
"Hedwig",
"Lisa",
"Janine",
"Pia",
"Anja",
"Elsbeth",
"Madeleine",
"Eveline",
"Judith",
"Diana",
"Françoise",
"Charlotte",
"Maja",
"Eliane",
"Renate",
"Christiane",
"Michèle",
"Jennifer",
"Bettina",
"Chiara",
"Bernadette",
"Aline",
"Carla",
"Helena",
"Brigitta",
"Mirjam",
"Theresia",
"Astrid",
"Nadja",
"Jana",
"Selina",
"Priska",
"Lena",
"Stephanie",
"Lucia",
"Linda",
"Regina",
"Agnes",
"Olivia",
"Sonia",
"Valérie",
"Klara",
"Ramona",
"Lina",
"Elsa",
"Helene",
"Monica",
"Iris",
"Hanna",
"Valentina",
"Annemarie",
"Elisa",
"Margrith",
"Dominique",
"Beatrix",
"Cristina",
"Paula",
"Magdalena",
"Livia",
"Sofia",
"Patrizia",
"Liliane",
"Nelly",
"Marion",
"Ida",
"Alina",
"Isabel",
"Vera",
"Stéphanie",
"Giulia",
"Leonie",
"Jeannette",
"Christa",
"Alessia",
"Véronique",
"Myriam",
"Emilie",
"Olga",
"Nora",
"Julie",
"Sylvia",
"Margaretha",
"Claudine",
"Marlise",
"Miriam",
"Sibylle",
"Sylvie",
"Lydia",
"Katja",
"Lorena",
"Jolanda",
"Rebecca",
"Mia",
"Irma",
"Larissa",
"Luana",
"Martine",
"Deborah",
"Francesca",
"Veronika",
"Isabella",
"Noemi",
"Ingrid",
"Frieda",
"Suzanne",
"Liselotte",
"Michaela",
"Florence",
"Evelyne",
"Hildegard",
"Corina",
"Danielle",
"Laurence",
"Carole",
"Milena",
"Cécile",
"Mara",
"Luzia",
"Sandrine",
"Gisela",
"Simona",
"Mélanie",
"Béatrice",
"Marta",
"Antonia",
"Erna",
"Gabriele",
"Katrin",
"Kathrin",
"Melissa",
"Camille",
"Adriana",
"Fiona",
"Lucie",
"Natalie",
"Teresa",
"Renata",
"Josiane",
"Sophia",
"Clara",
"Luisa",
"Silvana",
"Jeannine",
"Pascale",
"Hélène",
"Emilia",
"Joëlle",
"Gabriella",
"Maya",
"Marianna",
"Ines",
"Léa",
"Claire",
"Marisa",
"Sina",
"Lia",
"Paola",
"Mathilde",
"Sabina",
"Alessandra",
"Ivana",
"Anne-Marie",
"Elvira",
"Bianca",
"Samira",
"Cindy",
"Amélie",
"Chloé",
"Kim",
"Victoria",
"Annette",
"Angelina",
"Dorothea",
"Antoinette",
"Tina",
"Tania",
"Angelika",
"Valeria",
"Flavia",
"Margaritha",
"Rachel",
"Marguerite",
"Jeanne",
"Yvette",
"Natalia",
"Alicia",
"Giovanna",
"Mireille",
"Liliana",
"Pauline",
"Seraina",
"Elodie",
"Ariane",
"Helga",
"Zoé",
"Natascha",
"Muriel",
"Francine",
"Joana",
"Melina",
"Aurélie",
"Thi",
"Giuseppina",
"Tatiana",
"Margareta",
"Louise",
"Marija",
"Debora",
"Salome",
"Viviane",
"Fanny",
"Katia",
"Carolina",
"Irina",
"Bertha",
"Marlene",
"Noémie",
"Amanda",
"Sarina",
"Marlies",
"Lilian",
"Irène",
"Laetitia",
"Kristina",
"Jasmine",
"Ella",
"Jenny",
"Gabrielle",
"Carmela",
"Manon",
"Helen",
"Fatima",
"Stefania",
"Virginie",
"Ladina",
"Jelena",
"Berta",
"Antonella",
"Rebekka",
"Audrey",
"Anaïs",
"Tatjana",
"Annina",
"Margot",
"Carina",
"Samantha",
"Evelyn",
"Annamarie",
"Tiziana",
"Arlette",
"Emily",
"Kerstin",
"Svenja",
"Caterina",
"Christelle",
"Saskia",
"Elin",
"Lilly",
"Anouk",
"Rose",
"Fatma",
"Lynn",
"Elina",
"Colette",
"Josette",
"Leila",
"Gerda",
"Susana",
"Geneviève",
"Désirée",
"Naomi",
"Stella",
"Romina",
"Delphine",
"Aurora",
"Estelle",
"Juliette",
"Tabea",
"Anina",
"Thérèse",
"Mariana",
"Beatriz",
"Hilda",
"Lotti",
"Séverine",
"Delia",
"Ronja",
"Gina",
"Mila",
"Antonietta",
"Veronica",
"Aleksandra",
"Gisèle",
"Lidia",
"Natacha",
"Laure",
"Pamela",
"Rosemarie",
"Marie-Louise",
"Jael",
"Eleonora",
"Zoe",
"Franca",
"Hannah",
"Yolanda",
"Birgit",
"Amina",
"Leandra",
"Elise",
"Alma",
"Anastasia",
"Marlis",
"Fernanda",
"Irmgard",
"Micheline",
"Elfriede",
"Selma",
"Ilona",
"Danièle",
"Justine",
"Magali",
"Georgette",
"Graziella",
"Cynthia",
"Cäcilia",
"Loredana",
"Géraldine",
"Sylviane",
"Heidy",
"Alexia",
"Mary",
"Ingeborg",
"Emine",
"Yara",
"Ursina",
"Marlène",
"Morgane",
"Michela",
"Katarina",
"Marine",
"Ulrike",
"Daria",
"Bruna",
"Jasmina",
"Mira",
"Soraya",
"Juliana",
"Marlyse",
"Agnès",
"Carine",
"Gloria",
"Alena",
"Svetlana",
"Josefina",
"Annelise",
"Myrta",
"Roberta",
"Pierrette",
"Celine",
"Annika",
"Mirjana",
"Andrée",
"Célia",
"Serena",
"Christel",
"Susan",
"Jocelyne",
"Renée",
"Vesna",
"Andreia",
"Elizabeth",
"Cinzia",
"Karen",
"Cecilia",
"Karine",
"Marlen",
"Ilaria",
"Virginia",
"Suzana",
"Rose-Marie",
"Jeanine",
"Margarita",
"Joanna",
"Coralie",
"Elif",
"Dina",
"Janina",
"Josefine",
"Mina",
"Hannelore",
"Gordana",
"Luciana",
"Heike",
"Aurelia",
"Luna",
"Dagmar",
"Filomena",
"Dolores",
"Raymonde",
"Prisca",
"Annick",
"Huguette",
"Elisabetha",
"Dragana",
"Leona",
"Elke",
"Inès",
"Valerie",
"Ayse",
"Amelia",
"Flurina",
"Marie-Thérèse",
"Roswitha",
"Rosanna",
"Ginette",
"Matilde",
"Mélissa",
"Yolande",
"Océane",
"Giada",
"Murielle",
"Danijela",
"Sanja",
"Slavica",
"Adelina",
"Valentine",
"Catarina",
"Raquel",
"Emmanuelle",
"Dana",
"Erica",
"Marcelle",
"Nancy",
"Germaine",
"Concetta",
"Gianna",
"Jade",
"Lucienne",
"Letizia",
"Fatime",
"Odette",
"Solange",
"Lily",
"Nada",
"Lucy",
"Margherita",
"Hana",
"Elisabetta",
"Leana",
"Vivienne",
"Viola",
"Ljiljana",
"Yasmin",
"Agatha",
"Jutta",
"Anabela",
"Laila",
"Romana",
"Gaëlle",
"Belinda",
"Aida",
"Federica",
"Giuliana",
"Marie-Claire",
"Mirella",
"Eliana",
"Paulina",
"Diane",
"Paulette",
"Mona",
"Milica",
"Corinna",
"Yasmine",
"Annalise",
"Hatice",
"Alyssa",
"Ellen",
"Kelly",
"Biljana",
"Noelia",
"Alisha",
"Léonie",
"Amandine",
"Amelie",
"Amy",
"Lilli",
"Nelli",
"Margaux",
"Melisa",
"Anneliese",
"Marie-Claude",
"Sheila",
"Dragica",
"Xenia",
"Violeta",
"Annie",
"Lou",
"Meret",
"Ute",
"Irena",
"Catia",
"Giuseppa",
"Sybille",
"Lana",
"Celina",
"Aylin",
"Zita",
"Karolina",
"Louisa",
"Luise",
"Rosina",
"Jeanette",
"Sharon",
"Henriette",
"Joy",
"Inge",
"Carola",
"Tiffany",
"Margarete",
"Marietta",
"Josefa",
"Leyla",
"Nuria",
"Anne-Lise",
"Gilberte",
"Giorgia",
"Emanuela",
"Daisy",
"Angelica",
"Josephine",
"Ilse",
"Natasa",
"Andrina",
"Fabiana",
"Flora",
"Maude",
"Melinda",
"Silke",
"Enya",
"Amira",
"Beate",
"Viktoria",
"Francisca",
"Merita",
"Odile",
"Snezana",
"Ariana",
"Carol",
"Medina",
"Romy",
"Noëlle",
"Alissa",
"Elisabete",
"Camilla",
"Miranda",
"Leonora",
"Lejla",
"Zeynep",
"Maeva",
"Domenica",
"Raffaella",
"Salomé",
"Ornella",
"Rosaria",
"Alisa",
"Alba",
"Zorica",
"Roxane",
"Raphaela",
"Inês",
"Hermine",
"Waltraud",
"Aude",
"Selin",
"Claude",
"Arianna",
"Angélique",
"Leticia",
"Malin",
"Viviana",
"Annelies",
"Damaris",
"Liv",
"Maëlle",
"Sigrid",
"Jill",
"Karina",
"Liana",
"Eline",
"Lotte",
"Lise",
"Rina",
"Morena",
"Marilena",
"Leonor",
"Annamaria",
"Albina",
"Dijana",
"Grazia",
"Ester",
"Vivien",
"Käthi",
"Tara",
"Aurore",
"Katarzyna",
"Amalia",
"Celia",
"Seline",
"Anisa",
"Azra",
"Adeline",
"Fabiola",
"Agnieszka",
"Greta",
"Jane",
"Vincenza",
"Rosalia",
"Marie-Christine",
"Marijana",
"Jara",
"Gudrun",
"Edona",
"Gioia",
"Marcia",
"Myrtha",
"Ekaterina",
"Lucette",
"Gertrude",
"Ljubica",
"Adrienne",
"Malika",
"Ava",
"Yael",
"Lola",
"Marinette",
"Teuta",
"Joelle",
"Beata",
"Line",
"Priscilla",
"Rosalie",
"Mariette",
"Ada",
"Marielle",
"Juliane",
"Emina",
"Arta",
"Margarida",
"Claire-Lise",
"Gaia",
"Antje",
"Raffaela",
"Mercedes",
"Vlora",
"Arlinda",
"Nicoletta",
"Alison",
"Ottilia",
"Clémence",
"Lisbeth",
"Shqipe",
"Adele",
"Maryline",
"Sónia",
"Ewa",
"Drita",
"Gladys",
"Dilara",
"Malgorzata",
"Eleni",
"Sandy",
"Marika",
"Marthe",
"Norma",
"Carolin",
"Ina",
"Agathe",
"Alea",
"Anke",
"Zora",
"Cristiana",
"Marie-José",
"Liridona",
"Romane",
"Noa",
"Shpresa",
"Esma",
"Assunta",
"Vittoria",
"Blerta",
"Ema",
"Elma",
"Anika",
"Marie-France",
"Samanta",
"Mariella",
"Meryem",
"Tânia",
"Ghislaine",
"Marica",
"Desirée",
"Britta",
"Joséphine",
"Moira",
"Maud",
"Gemma",
"Silja",
"Sladjana",
"Sanela",
"Iva",
"Ann",
"Nadège",
"Corine",
"Frida",
"Cheyenne",
"Theres",
"Lilia",
"Matilda",
"Geraldine",
"Lisette",
"Margaret",
"Eloïse",
"Felicia",
"Hulda",
"Kathleen",
"Erina",
"Jovana",
"Timea",
"Sofie",
"Wanda",
"Anne-Sophie",
"Zahra",
"Florentina",
"Alexa",
"Ruzica",
"Ganimete",
"Herta",
"Agata",
"Yasemin",
"Frédérique",
"Nicola",
"Norah",
"Lorenza",
"Ilenia",
"Khadija",
"Elda",
"Felicitas",
"Charline",
"Ela",
"Eliza",
"Katalin",
"Rafaela",
"Tanya",
"Theresa",
"Floriane",
"Katherine",
"Asia",
"Mathilda",
"Fabia",
"Fatmire",
"Imelda",
"Susi",
"Zuzana",
"Cassandra",
"Donatella",
"Antonina",
"Luz",
"Yasmina",
"Eleonore",
"Bluette",
"Malea",
"Danica",
"Dunja",
"Kirsten",
"Eileen",
"Mirela",
"Vanesa",
"Filipa",
"Léna",
"Jaqueline",
"Evelin",
"Violette",
"Vjollca",
"Mariam",
"Maryam",
"Amela",
"Luigia",
"Noëmi",
"Joyce",
"Pierina",
"Aferdita",
"Cátia",
"Mandy",
"Regine",
"Branka",
"Radmila",
"Vreneli",
"Marcella",
"Grace",
"Ludivine",
"Natasha",
"Olena",
"Elea",
"Jil",
"Anne-Laure",
"Eléonore",
"Ayla",
"Mégane",
"Maddalena",
"Sereina",
"Tenzin",
"Dafina",
"Eve",
"Leslie",
"Alix",
"Kiara",
"Ardita",
"Aisha",
"Margit",
"Janet",
"Kira",
"Margreth",
"Amra",
"Marcela",
"Solène",
"Kristin",
"Fitore",
"Rosalba",
"Edina",
"Mariangela",
"Agnese",
"Albulena",
"Joanne",
"Ylenia",
"Clarissa",
"Magda",
"Marie-Laure",
"Anna-Maria",
"Luljeta",
"Marjorie",
"Annalisa",
"Lidija",
"Ajla",
"Sanije",
"Wendy",
"Wilma",
"Layla",
"Thea",
"Esra",
"Jaël",
"Fernande",
"Vania",
"Lindita",
"Tessa",
"Mimoza",
"Kata",
"Maryse",
"Dalia",
"Käthe",
"Blanka",
"Katerina",
"Ophélie",
"Leni",
"Egzona",
"Eugenia",
"Lavinia",
"Léane",
"Bukurije",
"Cordula",
"Teodora",
"Nikolina",
"Özlem",
"Lauriane",
"Milka",
"Patrícia",
"Aloisia",
"Lya",
"Derya",
"Margret",
"Juana",
"Vilma",
"Annabelle",
"Besarta",
"Norina",
"Cláudia",
"Nives",
"Hanife",
"Blerina",
"Lydie",
"Gerlinde",
"Déborah",
"Mirlinda",
"Vivian",
"María",
"Shania",
"Romaine",
"Tuana",
"Berthe",
"Friederike",
"Susann",
"Rosetta",
"Hava",
"Kaltrina",
"Marie-Jeanne",
"Iryna",
"Mihaela",
)
first_names = first_names_male + first_names_female
last_names = (
"Ackermann",
"Aebi",
"Albrecht",
"Ammann",
"Amrein",
"Arnold",
"Bachmann",
"Bader",
"Bär",
"Bättig",
"Bauer",
"Baumann",
"Baumgartner",
"Baur",
"Beck",
"Benz",
"Berger",
"Bernasconi",
"Betschart",
"Bianchi",
"Bieri",
"Blaser",
"Blum",
"Bolliger",
"Bosshard",
"Braun",
"Brun",
"Brunner",
"Bucher",
"Bühler",
"Bühlmann",
"Burri",
"Christen",
"Egger",
"Egli",
"Eichenberger",
"Erni",
"Ernst",
"Eugster",
"Fankhauser",
"Favre",
"Fehr",
"Felber",
"Felder",
"Ferrari",
"Fischer",
"Flückiger",
"Forster",
"Frei",
"Frey",
"Frick",
"Friedli",
"Fuchs",
"Furrer",
"Gasser",
"Geiger",
"Gerber",
"Gfeller",
"Giger",
"Gloor",
"Graf",
"Grob",
"Gross",
"Gut",
"Haas",
"Häfliger",
"Hafner",
"Hartmann",
"Hasler",
"Hauser",
"Hermann",
"Herzog",
"Hess",
"Hirt",
"Hodel",
"Hofer",
"Hoffmann",
"Hofmann",
"Hofstetter",
"Hotz",
"Huber",
"Hug",
"Hunziker",
"Hürlimann",
"Imhof",
"Isler",
"Iten",
"Jäggi",
"Jenni",
"Jost",
"Kägi",
"Kaiser",
"Kälin",
"Käser",
"Kaufmann",
"Keller",
"Kern",
"Kessler",
"Knecht",
"Koch",
"Kohler",
"Kuhn",
"Küng",
"Kunz",
"Lang",
"Lanz",
"Lehmann",
"Leu",
"Leunberger",
"Lüscher",
"Lustenberger",
"Lüthi",
"Lutz",
"Mäder",
"Maier",
"Marti",
"Martin",
"Maurer",
"Mayer",
"Meier",
"Meili",
"Meister",
"Merz",
"Mettler",
"Meyer",
"Michel",
"Moser",
"Müller",
"Näf",
"Ott",
"Peter",
"Pfister",
"Portmann",
"Probst",
"Rey",
"Ritter",
"Roos",
"Roth",
"Rüegg",
"Schäfer",
"Schaller",
"Schär",
"Schärer",
"Schaub",
"Scheidegger",
"Schenk",
"Scherrer",
"Schlatter",
"Schmid",
"Schmidt",
"Schneider",
"Schnyder",
"Schoch",
"Schuler",
"Schumacher",
"Schürch",
"Schwab",
"Schwarz",
"Schweizer",
"Seiler",
"Senn",
"Sidler",
"Siegrist",
"Sigrist",
"Spörri",
"Stadelmann",
"Stalder",
"Staub",
"Stauffer",
"Steffen",
"Steiger",
"Steiner",
"Steinmann",
"Stettler",
"Stocker",
"Stöckli",
"Stucki",
"Studer",
"Stutz",
"Suter",
"Sutter",
"Tanner",
"Thommen",
"Tobler",
"Vogel",
"Vogt",
"Wagner",
"Walder",
"Walter",
"Weber",
"Wegmann",
"Wehrli",
"Weibel",
"Wenger",
"Wettstein",
"Widmer",
"Winkler",
"Wirth",
"Wirz",
"Wolf",
"Wüthrich",
"Wyss",
"Zbinden",
"Zehnder",
"Ziegler",
"Zimmermann",
"Zingg",
"Zollinger",
"Zürcher",
)
prefixes = ("Dr.", "Prof.")
|
joke2k/faker
|
faker/providers/person/de_CH/__init__.py
|
Python
|
mit
| 41,031
|
[
"Brian"
] |
f1d5620d20ad316d9e8ae6298877be4a8e99529c3f4aa3c83ecbf44fbc67c138
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import, division
import numpy as np
import MDAnalysis
from pmda.hbond_analysis import HydrogenBondAnalysis
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_almost_equal, assert_array_equal
from MDAnalysisTests.datafiles import waterPSF, waterDCD, GRO
class TestHydrogenBondAnalysisTIP3P(object):
@staticmethod
@pytest.fixture(scope='class')
def universe():
return MDAnalysis.Universe(waterPSF, waterDCD)
kwargs = {
'donors_sel': 'name OH2',
'hydrogens_sel': 'name H1 H2',
'acceptors_sel': 'name OH2',
'd_h_cutoff': 1.2,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
@pytest.fixture(scope='class')
def h(self, universe):
h = HydrogenBondAnalysis(universe, **self.kwargs)
return h
@pytest.mark.parametrize("n_blocks", [1, 2, 3, 4, 8])
def test_hbond_analysis(self, h, n_blocks):
h.run(n_jobs=n_blocks, n_blocks=n_blocks)
assert len(np.unique(h.hbonds[:, 0])) == 10
assert len(h.hbonds) == 32
reference = {
'distance': {'mean': 2.7627309, 'std': 0.0905052},
'angle': {'mean': 158.9038039, 'std': 12.0362826},
}
assert_allclose(np.mean(h.hbonds[:, 4]),
reference['distance']['mean'])
assert_allclose(np.std(h.hbonds[:, 4]), reference['distance']['std'])
assert_allclose(np.mean(h.hbonds[:, 5]), reference['angle']['mean'])
assert_allclose(np.std(h.hbonds[:, 5]), reference['angle']['std'])
@pytest.mark.parametrize("n_blocks", [1, 2, 3, 4, 8])
def test_count_by_time(self, h, n_blocks):
h.run(n_jobs=n_blocks, n_blocks=n_blocks)
ref_times = np.arange(0.02, 0.21, 0.02)
ref_counts = np.array([3, 2, 4, 4, 4, 4, 3, 2, 3, 3])
counts = h.count_by_time()
assert_array_almost_equal(h.timesteps, ref_times)
assert_array_equal(counts, ref_counts)
def test_count_by_type(self, h):
h.run(n_jobs=4, n_blocks=4)
# Only one type of hydrogen bond in this system
ref_count = 32
counts = h.count_by_type()
assert int(counts[0, 2]) == ref_count
def test_count_by_ids(self, h):
h.run(n_jobs=4, n_blocks=4)
ref_counts = [1.0, 1.0, 0.5, 0.4, 0.2, 0.1]
unique_hbonds = h.count_by_ids()
# count_by_ids() returns raw counts
# convert to fraction of time that bond was observed
counts = unique_hbonds[:, 3] / len(h.timesteps)
assert_array_equal(counts, ref_counts)
def test_universe(self, h, universe):
ref = universe.atoms.positions
h.run(n_jobs=4, n_blocks=4)
u = h._universe()
assert_array_almost_equal(u.atoms.positions, ref)
class TestGuess_UseTopology(TestHydrogenBondAnalysisTIP3P):
"""Uses the same distance and cutoff hydrogen bond criteria as
:class:`TestHydrogenBondAnalysisTIP3P`, so the results are identical,
but the hydrogens and acceptors are guessed whilst the donor-hydrogen
pairs are determined via the topology.
"""
kwargs = {
'donors_sel': None,
'hydrogens_sel': None,
'acceptors_sel': None,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
class TestNoUpdating(TestHydrogenBondAnalysisTIP3P):
"""Uses the same distance and cutoff hydrogen bond criteria as
:class:`TestHydrogenBondAnalysisTIP3P`, but we set `update_selections` to
be False. The results are identical because the selections are the same
for each frame for this system.
"""
kwargs = {
'donors_sel': None,
'hydrogens_sel': 'name H1 H2',
'acceptors_sel': 'name OH2',
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0,
'update_selections': False
}
class TestGuessDonors_NoTopology(object):
"""Guess the donor atoms involved in hydrogen bonds using the partial
charges of the atoms.
"""
@staticmethod
@pytest.fixture(scope='class')
def universe():
return MDAnalysis.Universe(waterPSF, waterDCD)
kwargs = {
'donors_sel': None,
'hydrogens_sel': None,
'acceptors_sel': None,
'd_h_cutoff': 1.2,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
@pytest.fixture(scope='class')
def h(self, universe):
h = HydrogenBondAnalysis(universe, **self.kwargs)
return h
def test_guess_donors(self, h):
ref_donors = "(resname TIP3 and name OH2)"
donors = h.guess_donors(selection='all', max_charge=-0.5)
assert donors == ref_donors
class TestGuessDonors_GivenHydrogen(TestGuessDonors_NoTopology):
"""Guess the donor atoms involved in hydrogen bonds using the partial
charges of the atoms, given hydrogen selections.
"""
kwargs = {
'donors_sel': None,
'hydrogens_sel': 'name H1 H2',
'acceptors_sel': 'name OH2',
'd_h_cutoff': 1.2,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
class TestHydrogenBondAnalysisTIP3PStartStep(object):
"""Uses the same distance and cutoff hydrogen bond criteria as
:class:`TestHydrogenBondAnalysisTIP3P` but starting with the second
frame and using every other frame in the analysis.
"""
@staticmethod
@pytest.fixture(scope='class')
def universe():
return MDAnalysis.Universe(waterPSF, waterDCD)
kwargs = {
'donors_sel': 'name OH2',
'hydrogens_sel': 'name H1 H2',
'acceptors_sel': 'name OH2',
'd_h_cutoff': 1.2,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
@pytest.fixture(scope='class')
def h(self, universe):
h = HydrogenBondAnalysis(universe, **self.kwargs)
return h
@pytest.mark.parametrize("n_blocks", [1, 2, 3, 4])
def test_hbond_analysis(self, h, n_blocks):
h.run(start=1, step=2, n_jobs=n_blocks, n_blocks=n_blocks)
assert len(np.unique(h.hbonds[:, 0])) == 5
assert len(h.hbonds) == 15
reference = {
'distance': {'mean': 2.73942464, 'std': 0.05867924},
'angle': {'mean': 157.07768079, 'std': 9.72636682},
}
assert_allclose(np.mean(h.hbonds[:, 4]),
reference['distance']['mean'])
assert_allclose(np.std(h.hbonds[:, 4]), reference['distance']['std'])
assert_allclose(np.mean(h.hbonds[:, 5]), reference['angle']['mean'])
assert_allclose(np.std(h.hbonds[:, 5]), reference['angle']['std'])
@pytest.mark.parametrize("n_blocks", [1, 2, 3, 4])
def test_count_by_time(self, h, n_blocks):
h.run(start=1, step=2, n_jobs=n_blocks, n_blocks=n_blocks)
ref_times = np.array([0.04, 0.08, 0.12, 0.16, 0.20, ])
ref_counts = np.array([2, 4, 4, 2, 3])
counts = h.count_by_time()
assert_array_almost_equal(h.timesteps, ref_times)
assert_array_equal(counts, ref_counts)
def test_count_by_type(self, h):
h.run(start=1, step=2, n_jobs=4, n_blocks=4)
# Only one type of hydrogen bond in this system
ref_count = 15
counts = h.count_by_type()
assert int(counts[0, 2]) == ref_count
class TestNoBond_Topology(object):
"""Use the topology file that has positions information, but doesn't have
bonds information.
"""
kwargs = {
'donors_sel': None,
'hydrogens_sel': None,
'acceptors_sel': None,
'd_h_cutoff': 1.2,
'd_a_cutoff': 3.0,
'd_h_a_angle_cutoff': 120.0
}
def test_nobond(self):
u = MDAnalysis.Universe(GRO)
h = HydrogenBondAnalysis(u, **self.kwargs)
with pytest.raises(ValueError):
h._get_dh_pairs(u)
def test_universe(self):
universe = MDAnalysis.Universe(GRO)
ref = universe.atoms.positions
h = HydrogenBondAnalysis(universe, **self.kwargs)
u = h._universe()
assert_array_almost_equal(u.atoms.positions, ref)
|
MDAnalysis/pmda
|
pmda/test/test_hydrogenbonds_analysis.py
|
Python
|
gpl-2.0
| 9,140
|
[
"MDAnalysis"
] |
2ff63901619194eefa7d1a471724355c43f237b34dda5bbab606b447ab964a37
|
##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NCL, implemented as an easyblock
"""
import fileinput
import os
import re
import sys
from distutils.version import LooseVersion
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import get_software_root, get_software_version
class EB_NCL(EasyBlock):
"""Support for building/installing NCL."""
def configure_step(self):
"""Configure build:
- create Makefile.ini using make and run ymake script to create config file
- patch config file with correct settings, and add missing config entries
- create config/Site.local file to avoid interactive install
- generate Makefile using config/ymkmf sciprt
-
"""
try:
os.chdir('config')
except OSError, err:
self.log.error("Failed to change to the 'config' dir: %s" % err)
cmd = "make -f Makefile.ini"
run_cmd(cmd, log_all=True, simple=True)
cmd = "./ymake -config $PWD"
run_cmd(cmd, log_all=True, simple=True)
# figure out name of config file
cfg_regexp = re.compile('^\s*SYSTEM_INCLUDE\s*=\s*"(.*)"\s*$', re.M)
f = open("Makefile", "r")
txt = f.read()
f.close()
cfg_filename = cfg_regexp.search(txt).group(1)
# adjust config file as needed
ctof_libs = ''
ifort = get_software_root('ifort')
if ifort:
if LooseVersion(get_software_version('ifort')) < LooseVersion('2011.4'):
ctof_libs = '-lm -L%s/lib/intel64 -lifcore -lifport' % ifort
else:
ctof_libs = '-lm -L%s/compiler/lib/intel64 -lifcore -lifport' % ifort
elif get_software_root('GCC'):
ctof_libs = '-lgfortran -lm'
macrodict = {
'CCompiler': os.getenv('CC'),
'FCompiler': os.getenv('F77'),
'CcOptions': '-ansi %s' % os.getenv('CFLAGS'),
'FcOptions': os.getenv('FFLAGS'),
'COptimizeFlag': os.getenv('CFLAGS'),
'FOptimizeFlag': os.getenv('FFLAGS'),
'ExtraSysLibraries': os.getenv('LDFLAGS'),
'CtoFLibraries': ctof_libs
}
# replace config entries that are already there
for line in fileinput.input(cfg_filename, inplace=1, backup='%s.orig' % cfg_filename):
for (key, val) in macrodict.items():
regexp = re.compile("(#define %s\s*).*" % key)
match = regexp.search(line)
if match:
line = "#define %s %s\n" % (key, val)
macrodict.pop(key)
sys.stdout.write(line)
# add remaining config entries
f = open(cfg_filename, "a")
for (key, val) in macrodict.items():
f.write("#define %s %s\n" % (key, val))
f.close()
f = open(cfg_filename, "r")
self.log.debug("Contents of %s: %s" % (cfg_filename, f.read()))
f.close()
# configure
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
self.log.error("Failed to change to the build dir %s: %s" % (self.cfg['start_dir'], err))
# instead of running the Configure script that asks a zillion questions,
# let's just generate the config/Site.local file ourselves...
# order of deps is important
# HDF needs to go after netCDF, because both have a netcdf.h include file
deps = ["HDF5", "JasPer", "netCDF", "HDF", "g2lib", "g2clib", "Szip"]
libs = ''
includes = ''
for dep in deps:
root = get_software_root(dep)
if not root:
self.log.error('%s not available' % dep)
libs += ' -L%s/lib ' % root
includes += ' -I%s/include ' % root
cfgtxt="""#ifdef FirstSite
#endif /* FirstSite */
#ifdef SecondSite
#define YmakeRoot %(installdir)s
#define LibSearch %(libs)s
#define IncSearch %(includes)s
#define BuildNCL 1
#define HDFlib
#define HDFEOSlib
#define UdUnitslib
#define BuildGRIB2 1
#define BuildRasterHDF 0
#define BuildHDF4 0
#define BuildTRIANGLE 0
#define BuildUdunits 0
#define BuildHDFEOS 0
#define BuildHDFEOS5 0
#endif /* SecondSite */
""" % {
'installdir': self.installdir,
'libs': libs,
'includes': includes
}
f = open("config/Site.local", "w")
f.write(cfgtxt)
f.close()
# generate Makefile
cmd = "./config/ymkmf"
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Building is done in install_step."""
pass
def install_step(self):
"""Build in install dir using build_step."""
cmd = "make Everything"
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""
Custom sanity check for NCL
"""
custom_paths = {
'files': ["bin/ncl", "lib/libncl.a", "lib/libncarg.a"],
'dirs': ["include/ncarg"]
}
super(EB_NCL, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set NCARG_ROOT environment variable in module."""
txt = super(EB_NCL, self).make_module_extra()
txt += "setenv\tNCARG_ROOT\t$root\n"
return txt
|
JensTimmerman/easybuild-easyblocks
|
easybuild/easyblocks/n/ncl.py
|
Python
|
gpl-2.0
| 6,733
|
[
"NetCDF"
] |
423672ec83af27928b6ed53ac62dcfa42636df1e3dcf25e639bf794ea8bbc61a
|
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Example of plotting meshed surfaces with pyvisi
"""
import sys
numArgs = len(sys.argv)
if numArgs == 1:
ren_mod = "gnuplot"
else:
ren_mod = sys.argv[1]
# set up some data to plot
from numpy import *
# the x and y axes
x = arange(-2,2,0.2, dtype=floating)
y = arange(-2,3,0.2, dtype=floating)
# pick some interesting function to generate the data in the third dimension
# this is the one used in the matlab docs: z = x*exp(-x^2-y^2)
z = zeros((len(x),len(y)), dtype=floating)
# boy do *I* feel old fashioned writing it this way
# surely there's another way to do it: - something to do later
for i in range(len(x)):
for j in range(len(y)):
z[i,j] = x[i]*exp(-x[i]*x[i] - y[j]*y[j])
# import the general pyvisi stuff
from pyvisi import *
if ren_mod == "gnuplot":
from pyvisi.renderers.gnuplot import *
elif ren_mod == "vtk":
from pyvisi.renderers.vtk import *
else:
raise ValueError, "Unknown renderer module"
# define a scene object
# a Scene is a container for all of the kinds of things you want to put
# into your plot, for instance, images, meshes, arrow/vector/quiver
# plots, contour plots, spheres etc.
scene = Scene()
# create a MeshPlot object
plot = MeshPlot(scene)
# add some helpful info to the plot
plot.title = 'Example mesh plot'
plot.xlabel = 'x'
plot.ylabel = 'y'
plot.zlabel = 'z'
# assign the data to the plot
# this version assumes that we have x, then y, then z and that z is 2D
# and that x and y are 1D arrays
plot.setData(x,y,z)
# alternative syntax
#plot.setData(xData=x, yData=y, zData=z)
# or (but more confusing depending upon one's naming conventions)
#plot.setData(x=x, y=y, z=z)
# render the scene to screen
scene.render(pause=True, interactive=True)
# save the scene to file
scene.save(fname="meshPlot.png", format=PngImage())
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
examples/meshPlot.py
|
Python
|
gpl-2.0
| 2,578
|
[
"VTK"
] |
892eb107f0d856c7dd8eb4cc6b21a46ece0252683488fbdfc8c507278d7c73f8
|
#!/usr/bin/python
from avocado import Test
from avocado.utils import process
import os, re, vagrant, platform
class VagrantSshfs(Test):
def setUp(self):
self.vagrant_VAGRANTFILE_DIR = self.params.get('vagrant_VAGRANTFILE_DIR')
self.vagrant_PROVIDER = self.params.get('vagrant_PROVIDER', default='')
self.vagrant_RHN_USERNAME = self.params.get('vagrant_RHN_USERNAME')
self.vagrant_RHN_PASSWORD = self.params.get('vagrant_RHN_PASSWORD')
self.sudo_PASSWORD = self.params.get('sudo_PASSWORD')
self.platform = platform.system()
if "CYGWIN" in self.platform:
self.mountpoint = os.getenv("USERPROFILE")
self.mountpoint_vm = '/' + self.mountpoint[:1].lower() + self.mountpoint[2:].replace("\\", "/")
self.mountpoint_host = "/cygdrive" + self.mountpoint_vm
else:
self.mountpoint = self.mountpoint_host = self.mountpoint_vm = os.getenv("HOME")
self.dummy_file1_vm = os.path.join(self.mountpoint_vm, "dummy_file1.txt")
self.dummy_file2_vm = os.path.join(self.mountpoint_vm, "dummy_file2.txt")
self.dummy_file3_vm = os.path.join(self.mountpoint_vm, "dummy_file3.txt")
self.dummy_file1_host = os.path.join(self.mountpoint_host, "dummy_file1.txt")
self.dummy_file2_host = os.path.join(self.mountpoint_host, "dummy_file2.txt")
self.dummy_file3_host = os.path.join(self.mountpoint_host, "dummy_file3.txt")
self.dummy_contents1 = "Dumping dummy contents into file"
self.dummy_contents2 = "This is a dummy file"
self.v = vagrant.Vagrant(self.vagrant_VAGRANTFILE_DIR)
os.chdir(self.vagrant_VAGRANTFILE_DIR)
def vagrant_up_with_subscription(self):
''' vagrant up with registration to RHN '''
#self.vagrant_destroy()
self.log.info("Brining up the vagrant box and registering to RHN...")
os.environ["SUB_USERNAME"] = self.vagrant_RHN_USERNAME
os.environ["SUB_PASSWORD"] = self.vagrant_RHN_PASSWORD
cmd = "vagrant up --provider %s" %(self.vagrant_PROVIDER)
child = pexpect.spawn (cmd)
index = child.expect (['.*assword.*', pexpect.EOF, pexpect.TIMEOUT], timeout=300)
if index == 0:
child.sendline (self.sudo_PASSWORD)
self.log.info(child.after)
rc = child.expect(pexpect.EOF, timeout=None)
self.assertEqual(0, rc)
out = self.v.status()
state = re.search(r"state='(.*)',", str(out) ).group(1)
self.assertEqual("running", state, "The vagrant box is not up")
def remove_vm(self):
self.log.info("Destroying the vagrant box...")
os.chdir(self.vagrant_VAGRANTFILE_DIR)
self.v.destroy()
def test_check_mount(self):
self.vagrant_up_with_subscription()
self.log.info("Checking if the user home dir is mounted fine inside the VM...")
self.log.info("Checking the user home dir...")
self.assertTrue(os.path.isdir(self.mountpoint_host))
self.log.info("Checking the mount point in the CDK box...")
cmd = "vagrant ssh -c 'ls -d %s'" %(self.mountpoint_vm)
out = process.run(cmd, shell=True)
self.assertEqual(self.mountpoint_vm, out.stdout.strip("\r\n"), "User home dir is not mounted inside the VM")
def test_create_file(self):
self.log.info("Creating a file under user home dir...")
try:
open(self.dummy_file1_host, 'a').write(self.dummy_contents1)
except Exception as e:
self.log.error("Error while creating file")
raise e
cmd = "vagrant ssh -c 'cat %s'" %(self.dummy_file1_vm)
out = process.run(cmd, shell=True)
self.assertEqual(self.dummy_contents1, out.stdout.strip("\r\n"), "File contents do not match")
def test_create_file_inside_vm(self):
self.log.info("Creating a file under mount point inside the VM...")
try:
cmd = "vagrant ssh -c 'touch %s'" %(self.dummy_file2_vm)
out = process.run(cmd, shell=True)
cmd = "vagrant ssh -c 'echo \"%s\" > %s'" %(self.dummy_contents2, self.dummy_file3_vm)
out = process.run(cmd, shell=True)
with open(self.dummy_file3_host, 'r') as myfile:
data = myfile.read()
self.assertEqual(data.strip("\n"), self.dummy_contents2, "File contents do not match")
except Exception as e:
self.log.error("Error creating files")
raise e
def test_modify_files(self):
self.log.info("Editing the created files...")
try:
open(self.dummy_file3_host, 'a').write(self.dummy_contents1)
cmd = "vagrant ssh -c 'echo \"%s\" >> %s'" %(self.dummy_contents2, self.dummy_file1_vm)
out = process.run(cmd, shell=True)
with open(self.dummy_file1_host, 'r') as myfile:
data = myfile.read()
self.assertEqual(data.strip("\n"), self.dummy_contents1 + self.dummy_contents2, "File contents do not match")
with open(self.dummy_file3_host, 'r') as myfile:
data = myfile.read()
self.assertEqual(data.strip("\n"), self.dummy_contents2 + '\n' + self.dummy_contents1, "File contents do not match")
except Exception as e:
self.log.error("Error while modifying files")
raise e
def test_delete_files(self):
self.log.info("Deleting the files already created...")
try:
cmd = "vagrant ssh -c 'rm -f %s %s'" %(self.dummy_file1_vm, self.dummy_file2_vm)
out = process.run(cmd, shell=True)
os.remove(self.dummy_file3_host)
except Exception as e:
self.log.error("Error while removing file...")
raise e
self.remove_vm()
def tearDown(self):
print "End of test.........."
|
projectatomic/adb-tests
|
cdk-testsuite/tests/vagrant_sshfs.py
|
Python
|
gpl-2.0
| 5,438
|
[
"CDK"
] |
7eb86f9bf370884bc83277a29452a7cd5e56beb806e30dae34f4621eff6702a9
|
# This file is part of the pyGTM module.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) Mathieu Jeannin 2019 2020 <math.jeannin@free.fr>.
"""
Example file for the pyGTM package
It reproduces the main result of section IV in Passler, Jeannin and Paarman
https://arxiv.org/abs/2002.03832
fig 2
It also demonstrates how to use the euler angles for the layers
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import GTMcore as GTM
import Permittivities as mat
from matplotlib import rcParams, rcParamsDefault
c_const = 299792458.0 # m/s
# setup Simulation
## Parameters
f_cm = np.linspace(100.0, 400.0, 100) ## frequency range (cm-1->Hz)
f_sp = 100.0*c_const*f_cm ## frequency range (cm-1->Hz)
## **careful** angle of incidence is theta_in,
## it is **not** the euler anle theta for the layers
theta_in = np.deg2rad(80.0)
## Note that Phi in the text is actually Euler angle psi
psi_v = np.deg2rad(0.0) # to get the last value
phi = 0.0
# the principal vertical axis of the layer is still z so theta=0
theta = 0.0
## physical dimensions
region1_thickness = 1000e-6
region2_thickness = 1000e-6
crystal_thickness = 10.0e-6
crystal_dielectric_function = lambda x: np.array([ [2.0+0.1j, 0.0, 0.0], [0.0, 2.0+0.1j, 0.0], [0.0, 0.0, 2.0+0.1j] ])
region1_dielectric_function = lambda x: np.array([ [1.1, 0.0, 0.0], [0.0, 1.1, 0.0], [0.0, 0.0, 1.1] ])
region2_dielectric_function = lambda x: np.array([ [1.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 1.5] ])
## substrate, superstrate and layers, angles are left for later
region1 = GTM.Layer(thickness=region1_thickness, epsilon=region1_dielectric_function)
crystal = GTM.Layer(thickness=crystal_thickness, epsilon=crystal_dielectric_function)
region2 = GTM.Layer(thickness=region2_thickness, epsilon=region2_dielectric_function)
## setup the system
S = GTM.System()
S.set_superstrate(region1)
S.set_substrate(region2)
S.add_layer(crystal)
#
R_s = np.zeros((len(f_sp)))
R_p = np.zeros((len(f_sp)))
T_s = np.zeros((len(f_sp)))
T_p = np.zeros((len(f_sp)))
## set the layers orientation
S.substrate.set_euler(theta=theta, phi=phi, psi=psi_v)
S.superstrate.set_euler(theta=theta, phi=phi, psi=0.0) # this guy stands still
for L in S.layers:
L.set_euler(theta=theta, phi=phi, psi=psi_v)
# loop for frequency
for ii, fi in enumerate(f_sp):
S.initialize_sys(fi) # sets the epsilons w/ correct euler rotation
zeta_sys = np.sin(theta_in)*np.sqrt(S.superstrate.epsilon[0,0]) # in-plane wavevector
S.calculate_GammaStar(fi, zeta_sys) # main computation (discard output)
r, R_loc, t, T_loc = S.calculate_r_t(zeta_sys) # calculate reflectivity
R_p[ii] = R_loc[0] # p-pol only
R_s[ii] = R_loc[1] # s-pol only
T_p[ii] = T_loc[0] # p-pol only
T_s[ii] = T_loc[1] # s-pol only
zplot, E_out, H_out, zn_plot = S.calculate_Efield(fi, zeta_sys, magnetic=True)
zplot, E_out, H_out, zn_plot = S.calculate_Efield(fi, zeta_sys, magnetic=True)
S_loc, A_loc = S.calculate_Poynting_Absorption_vs_z(zplot, E_out, H_out, R_loc)
# Make pretty plot
for v, rp,rs,tp,ts in zip(f_cm,R_p,R_s,T_p,T_s):
print(v,rs,rp,ts,tp)
|
JohnKendrick/PDielec
|
PDielec/test_gtm.py
|
Python
|
mit
| 3,751
|
[
"CRYSTAL"
] |
0e23da7bc09e4435ab200331243736fe61c8b63fee9ff931497c0c4aef4ccdbb
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Parent classes for quantum chemistry program input and output file
formats.
"""
import re
class InputFormat(object):
def __init__(self, mem, mtd, bas, mol, sys, cast):
# total job memory in MB
self.memory = mem
# computational method
self.method = mtd.lower()
# qcdb.Molecule object
self.molecule = mol
# database member index
self.index = sys
# orbital basis set
self.basis = bas.lower()
# do cast up from sto-3g basis?
self.castup = cast
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
class InputFormat2(object):
def __init__(self, mem, mol, mtd, der, opt):
# total job memory in MB
self.memory = mem
# qcdb.Molecule object
self.molecule = mol
# computational method
self.method = mtd.lower()
# computational derivative level
self.dertype = der
# options dictionary
self.options = opt
# orbital basis set
self.basis = opt['GLOBALS']['BASIS']['value'].lower()
# do cast up from sto-3g basis?
self.castup = opt['SCF']['BASIS_GUESS']['value']
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
|
ashutoshvt/psi4
|
psi4/driver/qcdb/qcformat.py
|
Python
|
lgpl-3.0
| 4,080
|
[
"Psi4"
] |
3fac9222a62f3d9724b5518c1192af54bd896a5a2acfb6373662e8e19aa1c285
|
import sys
import gzip
from glob import glob
from io import BytesIO
from .. import backends, conventions
from ..core.alignment import auto_combine
from ..core.utils import close_on_error, is_remote_uri
from ..core.pycompat import basestring, OrderedDict, range
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path): # pragma: no cover
try:
import netCDF4
engine = 'netcdf4'
except ImportError:
try:
import pydap
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
'netCDF4-python or scipy installed')
return engine
def open_dataset(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True, engine=None,
chunks=None):
"""Load and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, file or xray.backends.*DataStore
Strings are interpreted as a path to a netCDF file or an OpenDAP URL
and opened with python-netCDF4, unless the filename ends with .gz, in
which case the file is gunzipped and opened with scipy.io.netcdf (only
netCDF3 supported). File-like objects are opened with scipy.io.netcdf
(only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist).
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. This is an experimental feature; see the documentation for more
details.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_mfdataset
"""
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords)
if chunks is not None:
ds = ds.chunk(chunks)
return ds
if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, basestring):
if filename_or_obj.endswith('.gz'):
if engine is not None and engine != 'scipy':
raise ValueError('can only read gzipped netCDF files with '
"default engine or engine='scipy'")
# if the string ends with .gz, then gunzip and open as netcdf file
if sys.version_info[:2] < (2, 7):
raise ValueError('reading a gzipped netCDF not '
'supported on Python 2.6')
try:
store = backends.ScipyDataStore(gzip.open(filename_or_obj))
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
if 'is not a valid NetCDF 3 file' in e.message:
raise ValueError('gzipped file loading only supports '
'NetCDF 3 files.')
else:
raise
else:
# TODO: automatically fall back to using pydap if given a URL and
# netCDF4 is not available
if engine is None:
engine = _get_default_engine(filename_or_obj,
allow_remote=True)
if engine == 'netcdf4':
store = backends.NetCDF4DataStore(filename_or_obj, group=group)
elif engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj)
elif engine == 'pydap':
store = backends.PydapDataStore(filename_or_obj)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(filename_or_obj, group=group)
else:
raise ValueError('unrecognized engine for open_dataset: %r'
% engine)
with close_on_error(store):
return maybe_decode_store(store)
else:
if engine is not None and engine != 'scipy':
raise ValueError('can only read file-like objects with '
"default engine or engine='scipy'")
# assume filename_or_obj is a file-like object
store = backends.ScipyDataStore(filename_or_obj)
return maybe_decode_store(store)
class _MultiFileCloser(object):
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
def open_mfdataset(paths, chunks=None, concat_dim=None, **kwargs):
"""Open multiple files as a single dataset.
Experimental. Requires dask to be installed.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open.
chunks : dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details.
concat_dim : str or DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
**kwargs : optional
Additional arguments passed on to :py:func:`xray.open_dataset`.
Returns
-------
xray.Dataset
See Also
--------
auto_combine
open_dataset
"""
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
datasets = [open_dataset(p, **kwargs) for p in paths]
file_objs = [ds._file_obj for ds in datasets]
datasets = [ds.chunk(chunks) for ds in datasets]
combined = auto_combine(datasets, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(file_objs)
return combined
def to_netcdf(dataset, path=None, mode='w', format=None, group=None,
engine=None):
if path is None:
path = BytesIO()
if engine is None:
engine = 'scipy'
elif engine is not None:
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
elif engine is None:
engine = _get_default_engine(path)
write_funcs = {'netcdf4': _to_netcdf4,
'scipy': _to_scipy_netcdf,
'h5netcdf': _to_h5netcdf}
try:
to_netcdf_func = write_funcs[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
return to_netcdf_func(dataset, path, mode, format, group)
def _to_netcdf4(dataset, path, mode, format, group):
if format is None:
format = 'NETCDF4'
with backends.NetCDF4DataStore(path, mode=mode, format=format,
group=group) as store:
dataset.dump_to_store(store)
def _to_h5netcdf(dataset, path, mode, format, group):
if format not in [None, 'NETCDF4']:
raise ValueError('invalid format for h5netcdf backend')
with backends.H5NetCDFStore(path, mode=mode, group=group) as store:
dataset.dump_to_store(store)
def _to_scipy_netcdf(dataset, path, mode, format, group):
if group is not None:
raise ValueError('cannot save to a group with the '
'scipy.io.netcdf backend')
if format is None or format == 'NETCDF3_64BIT':
version = 2
elif format == 'NETCDF3_CLASSIC':
version = 1
else:
raise ValueError('invalid format for scipy.io.netcdf backend: %r'
% format)
with backends.ScipyDataStore(path, mode='w', version=version) as store:
dataset.dump_to_store(store)
if isinstance(path, BytesIO):
return path.getvalue()
|
clarkfitzg/xray
|
xray/backends/api.py
|
Python
|
apache-2.0
| 10,589
|
[
"NetCDF"
] |
801533c86164ee61f169b3a0c8a1697f45767817146b95bb5be0800e6e785c07
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import abc
import os
import re
import glob
import logging
import fnmatch
import json
import six
from six.moves import zip
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
logger = logging.getLogger(__name__)
class AbstractDrone(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen's as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
"""
This is a bit confusing, since there maybe be multi-steps. By
default, assimilate will try to find a file simply named
vasprun.xml, vasprun.xml.bz2, or vasprun.xml.gz. Failing which
it will try to get a relax2 from an aflow style run if
possible. Or else, a randomly chosen file containing
vasprun.xml is chosen.
"""
for fname in vasprun_files:
if os.path.basename(fname) in ["vasprun.xml",
"vasprun.xml.gz",
"vasprun.xml.bz2"]:
filepath = fname
break
if re.search(r"relax2", fname):
filepath = fname
break
filepath = fname
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = glob.glob(os.path.join(path, filename + "*"))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename == "POTCAR" else files[0]
elif len(files) > 1:
"""
This is a bit confusing, since there maybe be
multiple steps. By default, assimilate will try to find
a file simply named filename, filename.bz2, or
filename.gz. Failing which it will try to get a relax2
from a custodian double relaxation style run if
possible. Or else, a random file is chosen.
"""
for fname in files:
if fnmatch.fnmatch(os.path.basename(fname),
r"{}(\.gz|\.bz2)*"
.format(filename)):
files_to_parse[filename] = fname
break
if fname == "POSCAR" and \
re.search(r"relax1", fname):
files_to_parse[filename] = fname
break
if (fname in ("CONTCAR", "OSZICAR") and
re.search(r"relax2", fname)):
files_to_parse[filename] = fname
break
files_to_parse[filename] = fname
poscar, contcar, incar, potcar, oszicar, dynmat = [None]*6
if 'POSCAR' in files_to_parse:
poscar = Poscar.from_file(files_to_parse["POSCAR"])
if 'CONTCAR' in files_to_parse:
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
if 'INCAR' in files_to_parse:
incar = Incar.from_file(files_to_parse["INCAR"])
if 'POTCAR' in files_to_parse:
potcar = Potcar.from_file(files_to_parse["POTCAR"])
if 'OSZICAR' in files_to_parse:
oszicar = Oszicar(files_to_parse["OSZICAR"])
if 'DYNMAT' in files_to_parse:
dynmat = Dynmat(files_to_parse["DYNMAT"])
param = {"hubbards":{}}
if poscar is not None and incar is not None and "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
param["is_hubbard"] = (
incar.get("LDAU", False) and sum(param["hubbards"].values()) > 0
) if incar is not None else False
param["run_type"] = None
if incar is not None:
param["run_type"] = "GGA+U" if param["is_hubbard"] else "GGA"
param["history"] = _get_transformation_history(path)
param["potcar_spec"] = potcar.spec if potcar is not None else None
energy = oszicar.final_energy if oszicar is not None else 1e10
structure = contcar.structure if contcar is not None\
else poscar.structure
initial_vol = poscar.structure.volume if poscar is not None else \
None
final_vol = contcar.structure.volume if contcar is not None else \
None
delta_volume = None
if initial_vol is not None and final_vol is not None:
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if dynmat is not None:
data['phonon_frequencies'] = dynmat.get_phonon_frequencies()
if self._inc_structure:
entry = ComputedStructureEntry(
structure, energy, parameters=param, data=data
)
else:
entry = ComputedEntry(
structure.composition, energy, parameters=param, data=data
)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters is None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data is None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge", "spin_mult",
"route"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
|
xhqu1981/pymatgen
|
pymatgen/apps/borg/hive.py
|
Python
|
mit
| 17,218
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
97a9babd78f286f5801ee1a946794112df2190b0d237e55531fc6dc239c48910
|
#!/usr/bin/env python2
from sepath_core import *
from itertools import chain
import re
import optparse
import sys
import pysam
import HTSeq
def count(cargo, seb, split):
cargo[seb][split] += 1
def write_bed(se_ga, fn):
with open(fn, "wb") as fh:
for iv, ses in se_ga.steps():
for gene_id, i,j,n in ses:
seg_id = "%s_%s:%s:%s" % (gene_id, i, j, n)
fh.write("\t".join(map(str, [iv.chrom, iv.start, iv.end, seg_id, 0,
iv.strand])) + "\n")
def write_sep(seps_counts, seps_lengths, se_gl, fh):
fh.write("\t".join(["gene_id", "total", "unique", "sep_length", "sep1", "sep2"]) + "\n")
for gene_id, ses_counts in seps_counts.iteritems():
#gene_length = se_gl[gene_id]
ses_lengths = seps_lengths[gene_id]
for ses, (total, unique) in ses_counts.iteritems():
sep_length = ses_lengths[ses][1]
#isize = ses_lengths[ses][2]
ses1 = "-".join(["%s:%s:%s" % se for se in ses[0]])
ses2 = "-".join(["%s:%s:%s" % se for se in ses[1]])
line = "\t".join([gene_id, str(total), str(unique), str(sep_length), ses1, ses2]
) + "\n"
fh.write(line)
def count_subexonpaths(seps):
counts = {}
for gene_id, ses_cargo in seps.iteritems():
counts[gene_id] = {}
for ses, cargo in ses_cargo.iteritems():
total = sum(cargo.values())
unique = len(cargo.values())
counts[gene_id][ses] = (total, unique)
return counts
def measure_subexonpaths(seps, se_gm):
lengths = {}
for gene_id, ses_cargo in seps.iteritems():
lengths[gene_id] = {}
for ses, cargo in ses_cargo.iteritems():
ses_lengths = [[(se_gm[(gene_id,) + se]).length for se in sei] for sei in ses]
try:
first_left = ses[0][0]
last_left = ses[0][-1]
first_right = ses[1][0]
last_right = ses[1][-1]
contiguous = (first_right[-1] - last_left[-1] <= 1)
if contiguous:
sep_length = sum(set(chain.from_iterable(ses_lengths)))
else:
sep_length = float("nan")
except IndexError:
contiguous = False
sep_length = float("nan")
lengths[gene_id][ses] = \
(
contiguous,
sep_length,
)
return lengths
if __name__ == "__main__":
optParser = optparse.OptionParser(
usage = "%prog [options] alignment_file annotation_file",
description = \
"This script takes a paired-end 'alignment_file' in BAM/SAM format and an" +
"'annotation_file' in GTF/GFF format and counts how many times a fragment was" +
"mapped to a specific order of sub-exons a.k.a the reads sub-exon path",
epilog = \
"Written by Marcin Cieslik (mcieslik@med.umich.edu) " +
"Michigan Center for Translational Pathology (c) 2014 " +
"Built using 'HTSeq' (%s)." % HTSeq.__version__
)
optParser.add_option("--stranded", action="store_true", dest="stranded",
default=False, help="turn on strand-specific analysis (fr-firststrand)")
optParser.add_option("--mmcut", type="int", dest="mmcut", default=1,
help="Do not count fragments that map to more than 'mmcut' locations")
optParser.add_option("--unique", action="store_true", dest="unique",
default=False, help="count unique fragments per sub-exon path")
optParser.add_option("--qc", type="string", dest="qc",
default="strict", help="read QC filtering 'strict' or 'loose'")
optParser.add_option("--out", type="string", dest="out",
help="sub-exon path output file (cnt)"),
optParser.add_option("--bed", type="string", dest="se_bed",
help="derived sub-exon annotation (bed)"),
optParser.add_option("--bag", type="string", dest="seb_json",
help="full sub-exon bag output file (json)"),
optParser.add_option("--path", type="string", dest="sep_json",
help="full sub-exon path output file (json)"),
optParser.add_option("--eattr", type="string", dest="eattr",
default="exon_id", help="GFF attribute to be used as exon id (default, " +
"suitable for Ensembl GTF files: exon_id)"),
optParser.add_option("--gattr", type="string", dest="gattr",
default="gene_id", help="GFF attribute to be used as gene id (default, " +
"suitable for Ensembl GTF files: gene_id)"),
optParser.add_option("--verbose", action="store_true", dest="verbose",
help="run-time messages printed to stderr")
optParser.add_option("--progress", type="int", dest="progress", default=100000,
help="progress on BAM processing printed every n lines")
if len(sys.argv) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len(args) != 2:
optParser.print_help()
sys.exit(1)
with pysam.Samfile(args[0]) as sf:
try:
order = re.search("SO:(.*)", sf.text).groups()[0]
except Exception, e:
order = None
if not order in ("queryname", "coordinate"):
sys.stderr.write("warning: missing SO SAM header flag. " +\
"Alignment_file should be sorted by queryname (better) or coordinate.\n")
sys.stderr.write("info: parsing GTF file\n")
se_ga, se_gm, se_gl, se_gs = parse_gtf(args[1], stranded=opts.stranded)
if opts.se_bed:
sys.stderr.write("info: writing sub-exon BED file\n")
write_bed(se_ga, opts.se_bed)
sys.stderr.write("info: finding unique sub-exons\n")
se_unique = unique_subexons(se_ga)
sys.stderr.write("info: processing BAM file\n")
cargo = defaultdict(lambda: defaultdict(int))
seb_cargos = scanBAM(sf, se_ga, count, cargo, opts.progress, opts.qc,
"fragment" if opts.unique else None, opts.mmcut)
sys.stderr.write("info: calculating sub-exon paths\n")
sep_cargos = seb2sep(seb_cargos, se_unique)
sys.stderr.write("info: counting sub-exon paths\n")
sep_counts = count_subexonpaths(sep_cargos)
sep_lengths = measure_subexonpaths(sep_cargos, se_gm)
sys.stderr.write("info: writing sep file '%s'\n" % opts.out)
out = open(opts.out, "wb") if opts.out else sys.stdout
write_sep(sep_counts, sep_lengths, se_gl, out)
|
mcieslik-mctp/sepath
|
sepath_count.py
|
Python
|
gpl-2.0
| 6,900
|
[
"HTSeq",
"pysam"
] |
5b40eb268f214a151ea90b1ddb5f765cacac4be052f7cc68386e76dc1699c7db
|
import pybullet as p
from time import sleep
import pybullet_data
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD)
p.resetDebugVisualizerCamera(3,-420,-30,[0.3,0.9,-2])
p.setGravity(0, 0, -10)
tex = p.loadTexture("uvmap.png")
planeId = p.loadURDF("plane.urdf", [0,0,-2])
boxId = p.loadURDF("cube.urdf", [0,3,2],useMaximalCoordinates = True)
bunnyId = p.loadSoftBody("torus/torus_textured.obj", simFileName="torus.vtk", mass = 3, useNeoHookean = 1, NeoHookeanMu = 180, NeoHookeanLambda = 600, NeoHookeanDamping = 0.01, collisionMargin = 0.006, useSelfCollision = 1, frictionCoeff = 0.5, repulsionStiffness = 800)
p.changeVisualShape(bunnyId, -1, rgbaColor=[1,1,1,1], textureUniqueId=tex, flags=0)
bunny2 = p.loadURDF("torus_deform.urdf", [0,1,0.2], flags=p.URDF_USE_SELF_COLLISION)
p.changeVisualShape(bunny2, -1, rgbaColor=[1,1,1,1], textureUniqueId=tex, flags=0)
p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25)
p.setRealTimeSimulation(0)
while p.isConnected():
p.stepSimulation()
p.getCameraImage(320,200)
p.setGravity(0,0,-10)
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/gym/pybullet_examples/deformable_torus.py
|
Python
|
agpl-3.0
| 1,136
|
[
"VTK"
] |
d387c83f5462da0162fd9685853a48aae6f383e89ce13aa127efa43f5b173154
|
from octopus.modules.es import dao
from datetime import datetime
from octopus.modules.account.exceptions import NonUniqueAccountException
def query_filter(q):
"""Function used by the query endpoint to ensure only the relevant account data is returned"""
# q is an esprit.models.Query object
# this limits the query to certain fields in the source, so that things like password
# hashes and activation/reset tokens are never sent to the client
q.include_source(["id", "email", "created_date", "last_updated", "role"])
class BasicAccountDAO(dao.ESDAO):
__type__ = 'account'
@classmethod
def pull_by_email(cls, email):
q = AccountQuery(email=email)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the email {x}".format(x=email))
elif len(accs) == 1:
return accs[0]
else:
return None
@classmethod
def get_by_reset_token(cls, reset_token, not_expired=True):
q = AccountQuery(reset_token=reset_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the reset token {x}".format(x=reset_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_reset_expired() and not_expired:
return None
return acc
@classmethod
def get_by_activation_token(cls, activation_token, not_expired=True):
q = AccountQuery(activation_token=activation_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the activation token {x}".format(x=activation_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_activation_expired() and not_expired:
return None
return acc
class AccountQuery(object):
def __init__(self, email=None, reset_token=None, activation_token=None):
self.email = email
self.reset_token = reset_token
self.activation_token = activation_token
def query(self):
q = {
"query" : {
"bool" : {
"must" : []
}
}
}
if self.email is not None:
q["query"]["bool"]["must"].append({"term" : {"email.exact" : self.email}})
if self.reset_token is not None:
q["query"]["bool"]["must"].append({"term" : {"reset_token.exact" : self.reset_token}})
if self.activation_token is not None:
q["query"]["bool"]["must"].append({"term" : {"activation_token.exact" : self.activation_token}})
return q
|
JiscPER/magnificent-octopus
|
octopus/modules/account/dao.py
|
Python
|
apache-2.0
| 2,831
|
[
"Octopus"
] |
9b567bd638fa7c5f9979a464860747ce41bbe125887315b22b4ee4b25dd6abc1
|
import numpy as np
from ase import Atoms
from ase.units import Bohr
from gpaw.density import RealSpaceDensity
from gpaw.lfc import BasisFunctions
from gpaw.mixer import Mixer
from gpaw.setup import Setups
from gpaw.xc import XC
from gpaw.utilities.tools import coordinates
from gpaw.mpi import rank
class HirshfeldDensity(RealSpaceDensity):
"""Density as sum of atomic densities."""
def __init__(self, calculator):
self.calculator = calculator
density = calculator.density
par = self.calculator.input_parameters
RealSpaceDensity.__init__(self, density.gd, density.finegd, 1, 0,
stencil=par.stencils[1])
def get_density(self, atom_indicees=None):
"""Get sum of atomic densities from the given atom list.
All atoms are taken if the list is not given."""
all_atoms = self.calculator.get_atoms()
if atom_indicees is None:
atom_indicees = range(len(all_atoms))
density = self.calculator.density
density.set_positions(all_atoms.get_scaled_positions() % 1.0,
self.calculator.wfs.rank_a)
# select atoms
atoms = []
D_asp = {}
rank_a = []
all_D_asp = self.calculator.density.D_asp
all_rank_a = self.calculator.density.rank_a
for a in atom_indicees:
if a in all_D_asp:
D_asp[len(atoms)] = all_D_asp.get(a)
atoms.append(all_atoms[a])
rank_a.append(all_rank_a[a])
atoms = Atoms(atoms, cell=all_atoms.get_cell())
spos_ac = atoms.get_scaled_positions() % 1.0
Z_a = atoms.get_atomic_numbers()
par = self.calculator.input_parameters
setups = Setups(Z_a, par.setups, par.basis, par.lmax,
XC(par.xc),
self.calculator.wfs.world)
self.D_asp = D_asp
# initialize
self.initialize(setups,
self.calculator.timer,
np.zeros((len(atoms), 3)), False)
self.set_mixer(None)
self.set_positions(spos_ac, rank_a)
basis_functions = BasisFunctions(self.gd,
[setup.phit_j
for setup in self.setups],
cut=True)
basis_functions.set_positions(spos_ac)
self.initialize_from_atomic_densities(basis_functions)
aed_sg, gd = self.get_all_electron_density(atoms,
gridrefinement=2)
return aed_sg[0], gd
class HirshfeldPartitioning:
"""Partion space according to the Hirshfeld method.
After: F. L. Hirshfeld Theoret. Chim.Acta 44 (1977) 129-138
"""
def __init__(self, calculator, density_cutoff=1.e-12):
self.calculator = calculator
self.density_cutoff = density_cutoff
def initialize(self):
self.atoms = self.calculator.get_atoms()
self.hdensity = HirshfeldDensity(self.calculator)
density_g, gd = self.hdensity.get_density()
self.invweight_g = 0. * density_g
density_ok = np.where(density_g > self.density_cutoff)
self.invweight_g[density_ok] = 1.0 / density_g[density_ok]
def get_calculator(self):
return self.calculator
def get_effective_volume_ratio(self, atom_index):
"""Effective volume to free volume ratio.
After: Tkatchenko and Scheffler PRL 102 (2009) 073005, eq. (7)
"""
atoms = self.atoms
finegd = self.calculator.density.finegd
den_g, gd = self.calculator.density.get_all_electron_density(atoms)
den_g = den_g.sum(axis=0)
assert(gd == finegd)
denfree_g, gd = self.hdensity.get_density([atom_index])
assert(gd == finegd)
# the atoms r^3 grid
position = self.atoms[atom_index].position / Bohr
r_vg, r2_g = coordinates(finegd, origin=position)
r3_g = r2_g * np.sqrt(r2_g)
weight_g = denfree_g * self.invweight_g
nom = finegd.integrate(r3_g * den_g * weight_g)
denom = finegd.integrate(r3_g * denfree_g)
return nom / denom
def get_weight(self, atom_index):
denfree_g, gd = self.hdensity.get_density([atom_index])
weight_g = denfree_g * self.invweight_g
return weight_g
def get_charges(self):
"""Charge on the atom according to the Hirshfeld partitioning"""
self.initialize()
finegd = self.calculator.density.finegd
den_g, gd = self.calculator.density.get_all_electron_density(self.atoms)
den_g = den_g.sum(axis=0)
charges = []
for ia, atom in enumerate(self.atoms):
weight_g = self.get_weight(ia)
charge = atom.number - finegd.integrate(weight_g * den_g)
charges.append(atom.number - finegd.integrate(weight_g * den_g))
return charges
def get_effective_volume_ratios(self):
"""Return the list of effective volume to free volume ratios."""
self.initialize()
ratios = []
for a, atom in enumerate(self.atoms):
ratios.append(self.get_effective_volume_ratio(a))
return np.array(ratios)
|
ajylee/gpaw-rtxs
|
gpaw/analyse/hirshfeld.py
|
Python
|
gpl-3.0
| 5,296
|
[
"ASE",
"GPAW"
] |
41846b76c058a276518fa61e315853b2af507c08e452710460448bd773e70efb
|
""" A set of tokens and convienence functions for input/output files.
author: Brian Schrader
since: 2015-12-28
"""
from __future__ import print_function
from collections import namedtuple
import glob, re
file_pattern = 'mp.{}.output{}'
alias_pattern = '{command}-{output_number}'
class PathToken(object):
""" A model for a given path. """
def __init__(self, alias, path):
self.alias = alias
self.path = path
def __repr__(self):
return '<Path {}: {}>'.format(self.alias, self.path)
def __eq__(self, other):
try:
return (self.alias == other.alias or
self.path == other.path)
except AttributeError:
return False
def eval(self):
return self.path
class CommentToken(object):
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return '<Comment: {}>'.format(''.join(self.parts))
def __eq__(self, other):
return ''.join(self.parts) == ''.join(other.parts)
def eval(self):
return '{}\n'.format(''.join(self.parts))
class FileToken(object):
""" An abc for input/output data classes. Provides various common
methods.
Warning: This class should not be used directly.
"""
def __init__(self, alias, filename='', cwd=''):
self.alias = alias
self.filename = filename
if len(cwd) > 0 and cwd[-1] != '/':
cwd += '/'
self.cwd = cwd
def __eq__(self, other):
try:
return (self.alias == other.alias or
self.filename == other.filename)
except AttributeError:
return False
def __hash__(self):
return hash(self.alias)
@property
def path(self):
return '{}{}'.format(self.cwd, self.filename)
class Input(FileToken):
""" A model of a single input to a given command. Input tokens can be
evaluated to obtain their actual filename(s).
"""
def __init__(self, alias, filename='', cwd='', and_or=''):
super(Input, self).__init__(alias, filename, cwd)
self.and_or = and_or
def __repr__(self):
try:
eval = self.eval()
except Exception:
eval = '?'
return '<Input: {}->[{}]{}>'.format(self.alias, eval,
' _{}_'.format(self.and_or) if self.and_or else '')
def fuzzy_match(self, other):
""" Given another token, see if either the major alias identifier
matches the other alias, or if magic matches the alias.
"""
magic, fuzzy = False, False
try:
magic = self.alias == other.magic
except AttributeError:
pass
if '.' in self.alias:
major = self.alias.split('.')[0]
fuzzy = major == other.alias
return magic or fuzzy
def eval(self):
""" Evaluates the given input and returns a string containing the
actual filenames represented. If the input token represents multiple
independent files, then eval will return a list of all the input files
needed, otherwise it returns the filenames in a string.
"""
if self.and_or == 'or':
return [Input(self.alias, file, self.cwd, 'and')
for file in self.files]
return ' '.join(self.files)
@property
def command_alias(self):
""" Returns the command alias for a given input. In most cases this
is just the input's alias but if the input is one of many, then
`command_alias` returns just the beginning of the alias cooresponding to
the command's alias.
"""
if '.' in self.alias:
return self.alias.split('-')[0]
return None
@property
def is_magic(self):
try:
return isinstance(self.eval(), list)
except ValueError:
return False
@property
def is_glob(self):
return '*' in self.filename
@property
def magic_path(self):
match = file_pattern.format(self.alias, '*')
return '{}{}'.format(self.cwd, match)
@property
def files(self):
""" Returns a list of all the files that match the given
input token.
"""
res = None
if not res:
res = glob.glob(self.path)
if not res and self.is_glob:
res = glob.glob(self.magic_path)
if not res:
res = glob.glob(self.alias)
if not res:
raise ValueError('No files match. %s' % self)
return res
@staticmethod
def from_string(string, _or=''):
""" Parse a given string and turn it into an input token. """
if _or:
and_or = 'or'
else:
and_or = ''
return Input(string, and_or=and_or)
class Output(FileToken):
""" A model of a single output to a given command. Output tokens can be
evaluated to obtain their actual filename(s).
"""
def __init__(self, alias, filename='', cwd='', magic=''):
super(Output, self).__init__(alias, filename, cwd)
self.ext = ''
self.magic = ''
self._clean(magic)
def __repr__(self):
return '<Output: {}->[{}]{} {}>'.format(self.alias, self.eval(),
(' ' + self.magic) if self.magic else '', self.ext)
def __eq__(self, other):
""" Overrides the token eq to allow for magic : alias comparison for
magic inputs. Defaults to the super() eq otherwise.
"""
try:
return (self.magic == other.alias or
super(Output, self).__eq__(other))
except AttributeError:
return False
def eval(self):
""" Returns a filename to be used for script output. """
if self.magic:
return self.magic
if not self.filename:
return file_pattern.format(self.alias, self.ext)
return self.path
def as_input(self):
""" Returns an input token for the given output. """
return Input(self.alias, self.eval())
def _clean(self, magic):
""" Given a magic string, remove the output tag designator. """
if magic.lower() == 'o':
self.magic = ''
elif magic[:2].lower() == 'o:':
self.magic = magic[2:]
elif magic[:2].lower() == 'o.':
self.ext = magic[1:]
@staticmethod
def from_string(string):
""" Parse a given string and turn it into an output token. """
return Output('', magic=string)
|
TorkamaniLab/metapipe
|
metapipe/models/tokens.py
|
Python
|
mit
| 6,534
|
[
"Brian"
] |
158172b84c46c426d998a1b24b20ccd4756ef4a3da4fd09aa3d2e6a0b3214ebb
|
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import sys
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.cc.kccsd_rhf import vector_to_nested, nested_to_vector
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.cc import eom_kccsd_ghf as eom_kgccsd
from pyscf.pbc.cc import kintermediates_rhf as imdk
from pyscf.pbc.cc.kccsd_rhf import _get_epq
from pyscf.pbc.cc.kccsd_t_rhf import _get_epqr
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.mp.kmp2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
einsum = lib.einsum
########################################
# EOM-IP-CCSD
########################################
def ipccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2ph operators are of the form s_{ij}^{ b}, i.e. 'jb' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
nmo = eom.nmo
t2 = imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# 1h-1h block
Hr1 = -einsum('ki,k->i', imds.Loo[kshift], r1)
# 1h-2h1p block
for kl in range(nkpts):
Hr1 += 2. * einsum('ld,ild->i', imds.Fov[kl], r2[kshift, kl])
Hr1 += -einsum('ld,lid->i', imds.Fov[kl], r2[kl, kshift])
for kk in range(nkpts):
kd = kconserv[kk, kshift, kl]
Hr1 += -2. * einsum('klid,kld->i', imds.Wooov[kk, kl, kshift], r2[kk, kl])
Hr1 += einsum('lkid,kld->i', imds.Wooov[kl, kk, kshift], r2[kk, kl])
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
# 2h1p-1h block
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] -= einsum('kbij,k->ijb', imds.Wovoo[kshift, kb, ki], r1)
# 2h1p-2h1p block
if eom.partition == 'mp':
fock = imds.eris.fock
foo = fock[:, :nocc, :nocc]
fvv = fock[:, nocc:, nocc:]
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] += einsum('bd,ijd->ijb', fvv[kb], r2[ki, kj])
Hr2[ki, kj] -= einsum('li,ljb->ijb', foo[ki], r2[ki, kj])
Hr2[ki, kj] -= einsum('lj,ilb->ijb', foo[kj], r2[ki, kj])
elif eom.partition == 'full':
if diag is not None:
diag = eom.get_diag(imds=imds)
diag_matrix2 = eom.vector_to_amplitudes(diag, nmo, nocc)[1]
Hr2 += diag_matrix2 * r2
else:
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] += einsum('bd,ijd->ijb', imds.Lvv[kb], r2[ki, kj])
Hr2[ki, kj] -= einsum('li,ljb->ijb', imds.Loo[ki], r2[ki, kj])
Hr2[ki, kj] -= einsum('lj,ilb->ijb', imds.Loo[kj], r2[ki, kj])
for kl in range(nkpts):
kk = kconserv[ki, kl, kj]
Hr2[ki, kj] += einsum('klij,klb->ijb', imds.Woooo[kk, kl, ki], r2[kk, kl])
kd = kconserv[kl, kj, kb]
Hr2[ki, kj] += 2. * einsum('lbdj,ild->ijb', imds.Wovvo[kl, kb, kd], r2[ki, kl])
Hr2[ki, kj] += -einsum('lbdj,lid->ijb', imds.Wovvo[kl, kb, kd], r2[kl, ki])
Hr2[ki, kj] += -einsum('lbjd,ild->ijb', imds.Wovov[kl, kb, kj], r2[ki, kl]) # typo in Ref
kd = kconserv[kl, ki, kb]
Hr2[ki, kj] += -einsum('lbid,ljd->ijb', imds.Wovov[kl, kb, ki], r2[kl, kj])
tmp = (2. * einsum('xyklcd,xykld->c', imds.Woovv[:, :, kshift], r2[:, :])
- einsum('yxlkcd,xykld->c', imds.Woovv[:, :, kshift], r2[:, :]))
Hr2[:, :] += -einsum('c,xyijcb->xyijb', tmp, t2[:, :, kshift])
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def lipccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2hp operators are of the form s_{kl}^{ d}, i.e. 'ld' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
assert(eom.partition is None)
if imds is None: imds = eom.make_imds()
t2 = imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
Hr1 = -einsum('ki,i->k',imds.Loo[kshift],r1)
for ki, kb in itertools.product(range(nkpts), repeat=2):
kj = kconserv[kshift,ki,kb]
Hr1 -= einsum('kbij,ijb->k',imds.Wovoo[kshift,kb,ki],r2[ki,kj])
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
SWooov = (2. * imds.Wooov[kk,kl,kshift] -
imds.Wooov[kl,kk,kshift].transpose(1, 0, 2, 3))
Hr2[kk,kl] -= einsum('klid,i->kld',SWooov,r1)
Hr2[kk,kshift] -= (kk==kd)*einsum('kd,l->kld',imds.Fov[kk],r1)
Hr2[kshift,kl] += (kl==kd)*2.*einsum('ld,k->kld',imds.Fov[kl],r1)
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
Hr2[kk,kl] -= einsum('ki,ild->kld',imds.Loo[kk],r2[kk,kl])
Hr2[kk,kl] -= einsum('lj,kjd->kld',imds.Loo[kl],r2[kk,kl])
Hr2[kk,kl] += einsum('bd,klb->kld',imds.Lvv[kd],r2[kk,kl])
for kj in range(nkpts):
kb = kconserv[kd, kl, kj]
SWovvo = (2. * imds.Wovvo[kl,kb,kd] -
imds.Wovov[kl,kb,kj].transpose(0, 1, 3, 2))
Hr2[kk,kl] += einsum('lbdj,kjb->kld',SWovvo,r2[kk,kj])
kb = kconserv[kd, kk, kj]
Hr2[kk,kl] -= einsum('kbdj,ljb->kld',imds.Wovvo[kk,kb,kd],r2[kl,kj])
Hr2[kk,kl] -= einsum('kbjd,jlb->kld',imds.Wovov[kk,kb,kj],r2[kj,kl])
ki = kconserv[kk,kj,kl]
Hr2[kk,kl] += einsum('klji,jid->kld',imds.Woooo[kk,kl,kj],r2[kj,ki])
tmp = np.zeros(nvir, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
for ki, kj in itertools.product(range(nkpts), repeat=2):
kc = kshift
tmp += einsum('ijcb,ijb->c',t2[ki, kj, kc],r2[ki, kj])
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
SWoovv = (2. * imds.Woovv[kl, kk, kd] -
imds.Woovv[kk, kl, kd].transpose(1, 0, 2, 3))
Hr2[kk, kl] -= einsum('lkdc,c->kld',SWoovv, tmp)
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def ipccsd_diag(eom, kshift, imds=None, diag=None):
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = t1.shape
kconserv = imds.kconserv
Hr1 = -np.diag(imds.Loo[kshift])
Hr2 = np.zeros((nkpts, nkpts, nocc, nocc, nvir), dtype=t1.dtype)
if eom.partition == 'mp':
foo = eom.eris.fock[:, :nocc, :nocc]
fvv = eom.eris.fock[:, nocc:, nocc:]
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] = fvv[kb].diagonal()
Hr2[ki, kj] -= foo[ki].diagonal()[:, None, None]
Hr2[ki, kj] -= foo[kj].diagonal()[:, None]
else:
idx = np.arange(nocc)
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] = imds.Lvv[kb].diagonal()
Hr2[ki, kj] -= imds.Loo[ki].diagonal()[:, None, None]
Hr2[ki, kj] -= imds.Loo[kj].diagonal()[:, None]
if ki == kconserv[ki, kj, kj]:
Hr2[ki, kj] += np.einsum('ijij->ij', imds.Woooo[ki, kj, ki])[:, :, None]
Hr2[ki, kj] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])
Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])
Hr2[ki, kj] += 2. * Wovvo
if ki == kj: # and i == j
Hr2[ki, ki, idx, idx] -= Wovvo
Hr2[ki, kj] -= np.einsum('ibib->ib', imds.Wovov[ki, kb, ki])[:, None, :]
kd = kconserv[kj, kshift, ki]
Hr2[ki, kj] -= 2. * np.einsum('ijcb,jibc->ijb', t2[ki, kj, kshift], imds.Woovv[kj, ki, kd])
Hr2[ki, kj] += np.einsum('ijcb,ijbc->ijb', t2[ki, kj, kshift], imds.Woovv[ki, kj, kd])
return eom.amplitudes_to_vector(Hr1, Hr2)
def ipccsd_star_contract(eom, ipccsd_evals, ipccsd_evecs, lipccsd_evecs, kshift, imds=None):
'''For description of arguments, see `ipccsd_star_contract` in `kccsd_ghf.py`.'''
assert (eom.partition is None)
if imds is None:
imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
eris = imds.eris
nkpts, nocc, nvir = t1.shape
dtype = np.result_type(t1, t2)
kconserv = eom.kconserv
mo_energy_occ = np.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = np.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
def contract_l3p(l1,l2,kptvec):
'''Create perturbed left 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
ki, kj, kk, ka, kb = kptvec
out = np.zeros((nocc,)*3 + (nvir,)*2, dtype=dtype)
if kk == kshift and kj == kconserv[ka,ki,kb]:
out += 0.5*np.einsum('ijab,k->ijkab', eris.oovv[ki,kj,ka], l1)
ke = kconserv[kb,ki,ka]
out += lib.einsum('eiba,jke->ijkab', eris.vovv[ke,ki,kb], l2[kj,kk])
km = kconserv[kshift,ki,ka]
out += -lib.einsum('kjmb,ima->ijkab', eris.ooov[kk,kj,km], l2[ki,km])
km = kconserv[ki,kb,kj]
out += -lib.einsum('ijmb,mka->ijkab', eris.ooov[ki,kj,km], l2[km,kk])
return out
def contract_pl3p(l1,l2,kptvec):
'''Create P(ia|jb) of perturbed left 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
kptvec = np.asarray(kptvec)
out = contract_l3p(l1,l2,kptvec)
out += contract_l3p(l1,l2,kptvec[[1,0,2,4,3]]).transpose(1,0,2,4,3) # P(ia|jb)
return out
def contract_r3p(r1,r2,kptvec):
'''Create perturbed right 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
ki, kj, kk, ka, kb = kptvec
out = np.zeros((nocc,)*3 + (nvir,)*2, dtype=dtype)
tmp = np.einsum('mbke,m->bke', eris.ovov[kshift,kb,kk], r1)
out += -lib.einsum('bke,ijae->ijkab', tmp, t2[ki,kj,ka])
ke = kconserv[kb,kshift,kj]
tmp = np.einsum('bmje,m->bej', eris.voov[kb,kshift,kj], r1)
out += -lib.einsum('bej,ikae->ijkab', tmp, t2[ki,kk,ka])
km = kconserv[ka,ki,kb]
tmp = np.einsum('mnjk,n->mjk', eris.oooo[km,kshift,kj], r1)
out += lib.einsum('mjk,imab->ijkab', tmp, t2[ki,km,ka])
ke = kconserv[kk,kshift,kj]
out += lib.einsum('eiba,kje->ijkab', eris.vovv[ke,ki,kb].conj(), r2[kk,kj])
km = kconserv[kk,kb,kj]
out += -lib.einsum('kjmb,mia->ijkab', eris.ooov[kk,kj,km].conj(), r2[km,ki])
km = kconserv[ki,kb,kj]
out += -lib.einsum('ijmb,kma->ijkab', eris.ooov[ki,kj,km].conj(), r2[kk,km])
return out
def contract_pr3p(r1,r2,kptvec):
'''Create P(ia|jb) of perturbed right 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
kptvec = np.asarray(kptvec)
out = contract_r3p(r1,r2,kptvec)
out += contract_r3p(r1,r2,kptvec[[1,0,2,4,3]]).transpose(1,0,2,4,3) # P(ia|jb)
return out
ipccsd_evecs = np.array(ipccsd_evecs)
lipccsd_evecs = np.array(lipccsd_evecs)
e_star = []
ipccsd_evecs, lipccsd_evecs = [np.atleast_2d(x) for x in [ipccsd_evecs, lipccsd_evecs]]
ipccsd_evals = np.atleast_1d(ipccsd_evals)
for ip_eval, ip_evec, ip_levec in zip(ipccsd_evals, ipccsd_evecs, lipccsd_evecs):
# Enforcing <L|R> = 1
l1, l2 = eom.vector_to_amplitudes(ip_levec, kshift)
r1, r2 = eom.vector_to_amplitudes(ip_evec, kshift)
ldotr = np.dot(l1, r1) + np.dot(l2.ravel(), r2.ravel())
# Transposing the l2 operator
l2T = np.zeros_like(l2)
for ki in range(nkpts):
for kj in range(nkpts):
ka = kconserv[ki,kshift,kj]
l2T[ki,kj] = l2[kj,ki].transpose(1,0,2)
l2 = (l2 + 2.*l2T)/3.
logger.info(eom, 'Left-right amplitude overlap : %14.8e + 1j %14.8e',
ldotr.real, ldotr.imag)
if abs(ldotr) < 1e-7:
logger.warn(eom, 'Small %s left-right amplitude overlap. Results '
'may be inaccurate.', ldotr)
l1 /= ldotr
l2 /= ldotr
deltaE = 0.0 + 1j*0.0
#eij = (mo_e_o[:, None, :, None, None] + mo_e_o[None, :, None, :, None])
# #mo_e_o[None, None, :, None, None, :])
for ka, kb in itertools.product(range(nkpts), repeat=2):
lijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
Plijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
rijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
eijk = np.zeros((nkpts,nkpts,nocc,nocc,nocc),dtype=mo_e_o.dtype)
kklist = kpts_helper.get_kconserv3(eom._cc._scf.cell, eom._cc.kpts,
[ka,kb,kshift,range(nkpts),range(nkpts)])
for ki, kj in itertools.product(range(nkpts), repeat=2):
kk = kklist[ki,kj]
kptvec = [ki,kj,kk,ka,kb]
lijkab[ki,kj] = contract_pl3p(l1,l2,kptvec)
rijkab[ki,kj] = contract_pr3p(r1,r2,kptvec)
for ki, kj in itertools.product(range(nkpts), repeat=2):
kk = kklist[ki,kj]
Plijkab[ki,kj] = (4.*lijkab[ki,kj] +
1.*lijkab[kj,kk].transpose(2,0,1,3,4) +
1.*lijkab[kk,ki].transpose(1,2,0,3,4) -
2.*lijkab[ki,kk].transpose(0,2,1,3,4) -
2.*lijkab[kk,kj].transpose(2,1,0,3,4) -
2.*lijkab[kj,ki].transpose(1,0,2,3,4))
eijk[ki,kj] = _get_epqr([0,nocc,ki,mo_e_o,eom.nonzero_opadding],
[0,nocc,kj,mo_e_o,eom.nonzero_opadding],
[0,nocc,kk,mo_e_o,eom.nonzero_opadding])
# Creating denominator
eab = _get_epq([0,nvir,ka,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kb,mo_e_v,eom.nonzero_vpadding],
fac=[-1.,-1.])
# Creating denominator
eijkab = (eijk[:, :, :, :, :, None, None] +
eab[None, None, None, None, None, :, :])
denom = eijkab + ip_eval
denom = 1. / denom
deltaE += lib.einsum('xyijkab,xyijkab,xyijkab', Plijkab, rijkab, denom)
deltaE *= 0.5
deltaE = deltaE.real
logger.info(eom, "ipccsd energy, star energy, delta energy = %16.12f, %16.12f, %16.12f",
ip_eval, ip_eval + deltaE, deltaE)
e_star.append(ip_eval + deltaE)
return e_star
class EOMIP(eom_kgccsd.EOMIP):
matvec = ipccsd_matvec
l_matvec = lipccsd_matvec
get_diag = ipccsd_diag
ccsd_star_contract = ipccsd_star_contract
@property
def nkpts(self):
return len(self.kpts)
@property
def ip_vector_desc(self):
"""Description of the IP vector."""
return [(self.nocc,), (self.nkpts, self.nkpts, self.nocc, self.nocc, self.nmo - self.nocc)]
def ip_amplitudes_to_vector(self, t1, t2):
"""Ground state amplitudes to a vector."""
return nested_to_vector((t1, t2))[0]
def ip_vector_to_amplitudes(self, vec):
"""Ground state vector to amplitudes."""
return vector_to_nested(vec, self.ip_vector_desc)
def vector_to_amplitudes(self, vector, kshift=None):
return self.ip_vector_to_amplitudes(vector)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
return self.ip_amplitudes_to_vector(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
return nocc + nkpts**2*nocc*nocc*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ip()
return imds
class EOMIP_Ta(EOMIP):
'''Class for EOM IPCCSD(T)*(a) method by Matthews and Stanton.'''
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_t3p2_ip(self._cc)
return imds
########################################
# EOM-EA-CCSD
########################################
def eaccsd_matvec(eom, vector, kshift, imds=None, diag=None):
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
nmo = eom.nmo
t2 = imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# Eq. (30)
# 1p-1p block
Hr1 = einsum('ac,c->a', imds.Lvv[kshift], r1)
# 1p-2p1h block
for kl in range(nkpts):
Hr1 += 2. * einsum('ld,lad->a', imds.Fov[kl], r2[kl, kshift])
Hr1 += -einsum('ld,lda->a', imds.Fov[kl], r2[kl, kl])
for kc in range(nkpts):
kd = kconserv[kshift, kc, kl]
Hr1 += 2. * einsum('alcd,lcd->a', imds.Wvovv[kshift, kl, kc], r2[kl, kc])
Hr1 += -einsum('aldc,lcd->a', imds.Wvovv[kshift, kl, kd], r2[kl, kc])
# Eq. (31)
# 2p1h-1p block
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wvvvo.dtype, r1.dtype))
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift,ka,kj]
Hr2[kj,ka] += einsum('abcj,c->jab',imds.Wvvvo[ka,kb,kshift],r1)
# 2p1h-2p1h block
if eom.partition == 'mp':
fock = eom.eris.fock
foo = fock[:, :nocc, :nocc]
fvv = fock[:, nocc:, nocc:]
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= einsum('lj,lab->jab', foo[kj], r2[kj, ka])
Hr2[kj, ka] += einsum('ac,jcb->jab', fvv[ka], r2[kj, ka])
Hr2[kj, ka] += einsum('bd,jad->jab', fvv[kb], r2[kj, ka])
elif eom.partition == 'full':
if diag is not None:
diag = eom.get_diag(imds=imds)
diag_matrix2 = eom.vector_to_amplitudes(diag, nmo, nocc)[1]
Hr2 += diag_matrix2 * r2
else:
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= einsum('lj,lab->jab', imds.Loo[kj], r2[kj, ka])
Hr2[kj, ka] += einsum('ac,jcb->jab', imds.Lvv[ka], r2[kj, ka])
Hr2[kj, ka] += einsum('bd,jad->jab', imds.Lvv[kb], r2[kj, ka])
for kd in range(nkpts):
kc = kconserv[ka, kd, kb]
Wvvvv = imds.get_Wvvvv(ka, kb, kc)
Hr2[kj, ka] += einsum('abcd,jcd->jab', Wvvvv, r2[kj, kc])
kl = kconserv[kd, kb, kj]
Hr2[kj, ka] += 2. * einsum('lbdj,lad->jab', imds.Wovvo[kl, kb, kd], r2[kl, ka])
# imds.Wvovo[kb,kl,kd,kj] <= imds.Wovov[kl,kb,kj,kd].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('bldj,lad->jab', imds.Wovov[kl, kb, kj].transpose(1, 0, 3, 2),
r2[kl, ka])
# imds.Wvoov[kb,kl,kj,kd] <= imds.Wovvo[kl,kb,kd,kj].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('bljd,lda->jab', imds.Wovvo[kl, kb, kd].transpose(1, 0, 3, 2),
r2[kl, kd])
kl = kconserv[kd, ka, kj]
# imds.Wvovo[ka,kl,kd,kj] <= imds.Wovov[kl,ka,kj,kd].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('aldj,ldb->jab', imds.Wovov[kl, ka, kj].transpose(1, 0, 3, 2),
r2[kl, kd])
tmp = (2. * einsum('xyklcd,xylcd->k', imds.Woovv[kshift, :, :], r2[:, :])
- einsum('xylkcd,xylcd->k', imds.Woovv[:, kshift, :], r2[:, :]))
Hr2[:, :] += -einsum('k,xykjab->xyjab', tmp, t2[kshift, :, :])
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2, kshift), kshift, const=0.0)
def leaccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2hp operators are of the form s_{ l}^{cd}, i.e. 'ld' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
assert(eom.partition is None)
if imds is None: imds = eom.make_imds()
t1 = imds.t1
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# 1p-1p block
Hr1 = np.einsum('ac,a->c', imds.Lvv[kshift], r1)
# 1p-2p1h block
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kj, ka, kshift]
Hr1 += np.einsum('abcj,jab->c', imds.Wvvvo[ka, kb, kshift], r2[kj, ka])
# 2p1h-1p block
Hr2 = np.zeros((nkpts, nkpts, nocc, nvir, nvir), dtype=np.complex128)
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
Hr2[kl, kc] += 2. * (kl==kd) * np.einsum('c,ld->lcd', r1, imds.Fov[kd])
Hr2[kl, kc] += - (kl==kc) * np.einsum('d,lc->lcd', r1, imds.Fov[kl])
SWvovv = (2. * imds.Wvovv[kshift, kl, kc] -
imds.Wvovv[kshift, kl, kd].transpose(0, 1, 3, 2))
Hr2[kl, kc] += np.einsum('a,alcd->lcd', r1, SWvovv)
# 2p1h-2p1h block
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
Hr2[kl, kc] += lib.einsum('lad,ac->lcd', r2[kl, kc], imds.Lvv[kc])
Hr2[kl, kc] += lib.einsum('lcb,bd->lcd', r2[kl, kc], imds.Lvv[kd])
Hr2[kl, kc] += -lib.einsum('jcd,lj->lcd', r2[kl, kc], imds.Loo[kl])
for kb in range(nkpts):
kj = kconserv[kl, kd, kb]
SWovvo = (2. * imds.Wovvo[kl, kb, kd] -
imds.Wovov[kl, kb, kj].transpose(0, 1, 3, 2))
Hr2[kl, kc] += lib.einsum('jcb,lbdj->lcd', r2[kj, kc], SWovvo)
kj = kconserv[kl, kc, kb]
Hr2[kl, kc] += -lib.einsum('lbjc,jbd->lcd', imds.Wovov[kl, kb, kj], r2[kj, kb])
Hr2[kl, kc] += -lib.einsum('lbcj,jdb->lcd', imds.Wovvo[kl, kb, kc], r2[kj, kd])
ka = kconserv[kc, kb, kd]
Wvvvv = imds.get_Wvvvv(ka, kb, kc)
Hr2[kl, kc] += lib.einsum('lab,abcd->lcd', r2[kl, ka], Wvvvv)
tmp = np.zeros((nocc),dtype=t1.dtype)
for ki, kc in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kc, kshift]
tmp += np.einsum('ijcb,ibc->j', imds.t2[ki, kshift, kc], r2[ki, kb])
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
SWoovv = (2. * imds.Woovv[kl, kshift, kd] -
imds.Woovv[kl, kshift, kc].transpose(0, 1, 3, 2))
Hr2[kl,kc] += -np.einsum('ljdc,j->lcd', SWoovv, tmp)
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def eaccsd_diag(eom, kshift, imds=None, diag=None):
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = t1.shape
kconserv = imds.kconserv
Hr1 = np.diag(imds.Lvv[kshift])
Hr2 = np.zeros((nkpts, nkpts, nocc, nvir, nvir), dtype=t2.dtype)
if eom.partition == 'mp':
foo = imds.eris.fock[:, :nocc, :nocc]
fvv = imds.eris.fock[:, nocc:, nocc:]
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= foo[kj].diagonal()[:, None, None]
Hr2[kj, ka] += fvv[ka].diagonal()[None, :, None]
Hr2[kj, ka] += fvv[kb].diagonal()
else:
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= imds.Loo[kj].diagonal()[:, None, None]
Hr2[kj, ka] += imds.Lvv[ka].diagonal()[None, :, None]
Hr2[kj, ka] += imds.Lvv[kb].diagonal()
Wvvvv = imds.get_Wvvvv(ka, kb, ka)
Hr2[kj, ka] += np.einsum('abab->ab', Wvvvv)
Hr2[kj, ka] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])[:, None, :]
Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])
Hr2[kj, ka] += 2. * Wovvo[:, None, :]
if ka == kb:
for a in range(nvir):
Hr2[kj, ka, :, a, a] -= Wovvo[:, a]
Hr2[kj, ka] -= np.einsum('jaja->ja', imds.Wovov[kj, ka, kj])[:, :, None]
Hr2[kj, ka] -= 2 * np.einsum('ijab,ijab->jab', t2[kshift, kj, ka], imds.Woovv[kshift, kj, ka])
Hr2[kj, ka] += np.einsum('ijab,ijba->jab', t2[kshift, kj, ka], imds.Woovv[kshift, kj, kb])
return eom.amplitudes_to_vector(Hr1, Hr2, kshift)
def eaccsd_star_contract(eom, eaccsd_evals, eaccsd_evecs, leaccsd_evecs, kshift, imds=None):
'''For descreation of arguments, see `eaccsd_star_contract` in `kccsd_ghf.py`.'''
assert (eom.partition is None)
if imds is None:
imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
eris = imds.eris
nkpts, nocc, nvir = t1.shape
dtype = np.result_type(t1, t2)
kconserv = eom.kconserv
mo_energy_occ = np.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = np.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
def contract_l3p(l1,l2,kptvec):
'''Create perturbed left 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
ki, kj, ka, kb, kc = kptvec
out = np.zeros((nocc,)*2 + (nvir,)*3, dtype=dtype)
if kc == kshift and kb == kconserv[ki,ka,kj]:
out -= 0.5*lib.einsum('ijab,c->ijabc', eris.oovv[ki,kj,ka], l1)
km = kconserv[ki,ka,kj]
out += lib.einsum('jima,mbc->ijabc', eris.ooov[kj,ki,km], l2[km,kb])
ke = kconserv[kshift,ka,ki]
out -= lib.einsum('ejcb,iae->ijabc', eris.vovv[ke,kj,kc], l2[ki,ka])
ke = kconserv[kshift,kc,ki]
out -= lib.einsum('ejab,iec->ijabc', eris.vovv[ke,kj,ka], l2[ki,ke])
return out
def contract_pl3p(l1,l2,kptvec):
'''Create P(ia|jb) of perturbed left 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
kptvec = np.asarray(kptvec)
out = contract_l3p(l1,l2,kptvec)
out += contract_l3p(l1,l2,kptvec[[1,0,3,2,4]]).transpose(1,0,3,2,4) # P(ia|jb)
return out
def contract_r3p(r1,r2,kptvec):
'''Create perturbed right 3j2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
ki, kj, ka, kb, kc = kptvec
out = np.zeros((nocc,)*2 + (nvir,)*3, dtype=dtype)
ke = kconserv[ki,ka,kj]
tmp = lib.einsum('bcef,f->bce', eris.vvvv[kb,kc,ke], r1)
out -= lib.einsum('bce,ijae->ijabc', tmp, t2[ki,kj,ka])
km = kconserv[kshift,kc,kj]
tmp = einsum('mcje,e->mcj',eris.ovov[km,kc,kj],r1)
out += einsum('mcj,imab->ijabc',tmp,t2[ki,km,ka])
km = kconserv[kc,ki,ka]
tmp = einsum('bmje,e->mbj',eris.voov[kb,km,kj],r1)
out += einsum('mbj,imac->ijabc',tmp,t2[ki,km,ka])
km = kconserv[ki,ka,kj]
out += einsum('jima,mcb->ijabc',eris.ooov[kj,ki,km].conj(),r2[km,kc])
ke = kconserv[kshift,ka,ki]
out += -einsum('ejcb,iea->ijabc',eris.vovv[ke,kj,kc].conj(),r2[ki,ke])
ke = kconserv[kshift,kc,kj]
out += -einsum('eiba,jce->ijabc',eris.vovv[ke,ki,kb].conj(),r2[kj,kc])
return out
def contract_pr3p(r1,r2,kptvec):
'''Create P(ia|jb) of perturbed right 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
kptvec = np.asarray(kptvec)
out = contract_r3p(r1,r2,kptvec)
out += contract_r3p(r1,r2,kptvec[[1,0,3,2,4]]).transpose(1,0,3,2,4) # P(ia|jb)
return out
eaccsd_evecs = np.array(eaccsd_evecs)
leaccsd_evecs = np.array(leaccsd_evecs)
e_star = []
eaccsd_evecs, leaccsd_evecs = [np.atleast_2d(x) for x in [eaccsd_evecs, leaccsd_evecs]]
eaccsd_evals = np.atleast_1d(eaccsd_evals)
for ea_eval, ea_evec, ea_levec in zip(eaccsd_evals, eaccsd_evecs, leaccsd_evecs):
# Enforcing <L|R> = 1
l1, l2 = eom.vector_to_amplitudes(ea_levec, kshift)
r1, r2 = eom.vector_to_amplitudes(ea_evec, kshift)
ldotr = np.dot(l1, r1) + np.dot(l2.ravel(), r2.ravel())
# Transposing the l2 operator
l2T = np.zeros_like(l2)
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kj,ka,kshift]
l2T[kj,kb] = l2[kj,ka].transpose(0,2,1)
l2 = (l2 + 2.*l2T)/3.
logger.info(eom, 'Left-right amplitude overlap : %14.8e + 1j %14.8e',
ldotr.real, ldotr.imag)
if abs(ldotr) < 1e-7:
logger.warn(eom, 'Small %s left-right amplitude overlap. Results '
'may be inaccurate.', ldotr)
l1 /= ldotr
l2 /= ldotr
deltaE = 0.0 + 1j*0.0
for ki, kj in itertools.product(range(nkpts), repeat=2):
lijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
Plijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
rijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
eabc = np.zeros((nkpts,nkpts,nvir,nvir,nvir),dtype=dtype)
kclist = kpts_helper.get_kconserv3(eom._cc._scf.cell, eom._cc.kpts,
[ki,kj,kshift,range(nkpts),range(nkpts)])
for ka, kb in itertools.product(range(nkpts), repeat=2):
kc = kclist[ka,kb]
kptvec = [ki,kj,ka,kb,kc]
lijabc[ka,kb] = contract_pl3p(l1,l2,kptvec)
rijabc[ka,kb] = contract_pr3p(r1,r2,kptvec)
for ka, kb in itertools.product(range(nkpts), repeat=2):
kc = kclist[ka,kb]
Plijabc[ka,kb] = (4.*lijabc[ka,kb] +
1.*lijabc[kb,kc].transpose(0,1,4,2,3) +
1.*lijabc[kc,ka].transpose(0,1,3,4,2) -
2.*lijabc[ka,kc].transpose(0,1,2,4,3) -
2.*lijabc[kc,kb].transpose(0,1,4,3,2) -
2.*lijabc[kb,ka].transpose(0,1,3,2,4))
eabc[ka,kb] = _get_epqr([0,nvir,ka,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kb,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kc,mo_e_v,eom.nonzero_vpadding],
fac=[-1.,-1.,-1.])
# Creating denominator
eij = _get_epq([0,nocc,ki,mo_e_o,eom.nonzero_opadding],
[0,nocc,kj,mo_e_o,eom.nonzero_opadding])
eijabc = (eij[None, None, :, :, None, None, None] +
eabc[:, :, None, None, :, :, :])
denom = eijabc + ea_eval
denom = 1. / denom
deltaE += lib.einsum('xyijabc,xyijabc,xyijabc', Plijabc, rijabc, denom)
deltaE *= 0.5
deltaE = deltaE.real
logger.info(eom, "eaccsd energy, star energy, delta energy = %16.12f, %16.12f, %16.12f",
ea_eval, ea_eval + deltaE, deltaE)
e_star.append(ea_eval + deltaE)
return e_star
class EOMEA(eom_kgccsd.EOMEA):
matvec = eaccsd_matvec
l_matvec = leaccsd_matvec
get_diag = eaccsd_diag
ccsd_star_contract = eaccsd_star_contract
@property
def nkpts(self):
return len(self.kpts)
@property
def ea_vector_desc(self):
"""Description of the EA vector."""
nvir = self.nmo - self.nocc
return [(nvir,), (self.nkpts, self.nkpts, self.nocc, nvir, nvir)]
def ea_amplitudes_to_vector(self, t1, t2, kshift=None, kconserv=None):
"""Ground state amplitudes to a vector."""
return nested_to_vector((t1, t2))[0]
def ea_vector_to_amplitudes(self, vec):
"""Ground state vector to apmplitudes."""
return vector_to_nested(vec, self.ea_vector_desc)
def vector_to_amplitudes(self, vector, kshift=None):
return self.ea_vector_to_amplitudes(vector)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
return self.ea_amplitudes_to_vector(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
return nvir + nkpts**2*nocc*nvir*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ea()
return imds
class EOMEA_Ta(EOMEA):
'''Class for EOM EACCSD(T)*(a) method by Matthews and Stanton.'''
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_t3p2_ea(self._cc)
return imds
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, left=False,
eris=None, imds=None, partition=None, kptlist=None,
dtype=None):
'''See `kernel_ee()` for a description of arguments.'''
raise NotImplementedError
def eomee_ccsd_singlet(eom, nroots=1, koopmans=False, guess=None, left=False,
eris=None, imds=None, diag=None, partition=None,
kptlist=None, dtype=None):
'''See `eom_kgccsd.kernel()` for a description of arguments.'''
eom.converged, eom.e, eom.v \
= eom_kgccsd.kernel_ee(eom, nroots, koopmans, guess, left, eris=eris,
imds=imds, diag=diag, partition=partition,
kptlist=kptlist, dtype=dtype)
return eom.e, eom.v
def vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv):
'''Transform 1-dimensional array to 3- and 7-dimensional arrays, r1 and r2.
For example:
vector: a 1-d array with all r1 elements, and r2 elements whose indices
satisfy (i k_i a k_a) >= (j k_j b k_b)
return: [r1, r2], where
r1 = r_{i k_i}^{a k_a} is a 3-d array whose elements can be accessed via
r1[k_i, i, a].
r2 = r_{i k_i, j k_j}^{a k_a, b k_b} is a 7-d array whose elements can
be accessed via r2[k_i, k_j, k_a, i, j, a, b]
'''
nvir = nmo - nocc
nov = nocc*nvir
r1 = vector[:nkpts*nov].copy().reshape(nkpts, nocc, nvir)
r2 = np.zeros((nkpts**2, nkpts, nov, nov), dtype=vector.dtype)
idx, idy = np.tril_indices(nov)
nov2_tril = nov * (nov + 1) // 2
nov2 = nov * nov
r2_tril = vector[nkpts*nov:].copy()
offset = 0
for ki, ka, kj in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
if kika == kjkb:
tmp = r2_tril[offset:offset+nov2_tril]
r2[kika, kj, idx, idy] = tmp
r2[kjkb, ki, idy, idx] = tmp
offset += nov2_tril
elif kika > kjkb:
tmp = r2_tril[offset:offset+nov2].reshape(nov, nov)
r2[kika, kj] = tmp
r2[kjkb, ki] = tmp.transpose()
offset += nov2
# r2 indices (old): (k_i, k_a), (k_J), (i, a), (J, B)
# r2 indices (new): k_i, k_J, k_a, i, J, a, B
r2 = r2.reshape(nkpts, nkpts, nkpts, nocc, nvir, nocc, nvir).transpose(0,2,1,3,5,4,6)
return [r1, r2]
def amplitudes_to_vector_singlet(r1, r2, kconserv):
'''Transform 3- and 7-dimensional arrays, r1 and r2, to a 1-dimensional
array with unique indices.
For example:
r1: t_{i k_i}^{a k_a}
r2: t_{i k_i, j k_j}^{a k_a, b k_b}
return: a vector with all r1 elements, and r2 elements whose indices
satisfy (i k_i a k_a) >= (j k_j b k_b)
'''
# r1 indices: k_i, i, a
nkpts, nocc, nvir = np.asarray(r1.shape)[[0, 1, 2]]
nov = nocc * nvir
# r2 indices (old): k_i, k_J, k_a, i, J, a, B
# r2 indices (new): (k_i, k_a), (k_J), (i, a), (J, B)
r2 = r2.transpose(0,2,1,3,5,4,6).reshape(nkpts**2, nkpts, nov, nov)
idx, idy = np.tril_indices(nov)
nov2_tril = nov * (nov + 1) // 2
nov2 = nov * nov
vector = np.empty(r2.size, dtype=r2.dtype)
offset = 0
for ki, ka, kj in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
r2ovov = r2[kika, kj]
if kika == kjkb:
vector[offset:offset+nov2_tril] = r2ovov[idx, idy]
offset += nov2_tril
elif kika > kjkb:
vector[offset:offset+nov2] = r2ovov.ravel()
offset += nov2
vector = np.hstack((r1.ravel(), vector[:offset]))
return vector
def join_indices(indices, struct):
'''Returns a joined index for an array of indices.
Args:
indices (np.array): an array of indices
struct (np.array): an array of index ranges
Example:
indices = np.array((3, 4, 5))
struct = np.array((10, 10, 10))
join_indices(indices, struct): 345
'''
if not isinstance(indices, np.ndarray) or not isinstance(struct, np.ndarray):
raise TypeError("Arguments %s and %s should both be numpy.ndarray" %
(repr(indices), repr(struct)))
if indices.size != struct.size:
raise ValueError("Structure shape mismatch: expected dimension = %d, found %d" %
(struct.size, indices.size))
if (indices >= struct).all():
raise ValueError("Indices are out of range")
result = 0
for dim in range(struct.size):
result += indices[dim] * np.prod(struct[dim+1:])
return result
def eeccsd_matvec(eom, vector, kshift, imds=None, diag=None):
raise NotImplementedError
def eeccsd_matvec_singlet(eom, vector, kshift, imds=None, diag=None):
"""Spin-restricted, k-point EOM-EE-CCSD equations for singlet excitation only.
This implementation can be checked against the spin-orbital version in
`eom_kccsd_ghf.eeccsd_matvec()`.
"""
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
nvir = nmo - nocc
nkpts = eom.nkpts
kconserv = imds.kconserv
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
kconserv_r2 = eom.get_kconserv_ee_r2(kshift)
cput1 = (logger.process_clock(), logger.perf_counter())
r1, r2 = vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv_r2)
cput1 = log.timer_debug1("vector_to_amplitudes_singlet", *cput1)
# Build antisymmetrized tensors that will be used later
# antisymmetrized r2 : rbar_ijab = 2 r_ijab - r_ijba
# antisymmetrized woOoV: wbar_nmie = 2 W_nmie - W_nmei
# antisymmetrized wvOvV: wbar_amfe = 2 W_amfe - W_amef
# antisymmetrized woVvO: wbar_mbej = 2 W_mbej - W_mbje
r2bar = np.zeros_like(r2)
woOoV_bar = np.zeros_like(imds.woOoV)
wvOvV_bar = np.zeros_like(imds.wvOvV)
woVvO_bar = np.zeros_like(imds.woVvO)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# rbar_ijab = 2 r_ijab - r_ijba
# ki - ka + kj - kb = kshift
kb = kconserv_r2[ki, ka, kj]
r2bar[ki, kj, ka] = 2. * r2[ki, kj, ka] - r2[ki, kj, kb].transpose(0,1,3,2)
# wbar_nmie = 2 W_nmie - W_nmei = 2 W_nmie - W_mnie
# ki->kn, kj->km, ka->ki
wkn = ki
wkm = kj
wki = ka
# kn + km - ki - ke = G
wke = kconserv[wkn, wki, wkm]
woOoV_bar[wkn, wkm, wki] = 2. * imds.woOoV[wkn, wkm, wki] - imds.woOoV[wkm, wkn, wki].transpose(1,0,2,3)
# wbar_amfe = 2 W_amfe - W_amef
# ki->ka, kj->km, ka->kf, kb->ke
wka = ki
wkm = kj
wkf = ka
# ka + km - kf - ke = G
wke = kconserv[wka, wkf, wkm]
wvOvV_bar[wka, wkm, wkf] = 2. * imds.wvOvV[wka, wkm, wkf] - imds.wvOvV[wka, wkm, wke].transpose(0,1,3,2)
# wbar_mbej = 2 W_mbej - W_mbje
# ki->km, kj->kb, ka->ke
wkm = ki
wkb = kj
wke = ka
# km + kb - ke - kj = G
wkj = kconserv[wkm, wke, wkb]
woVvO_bar[wkm, wkb, wke] = 2. * imds.woVvO[wkm, wkb, wke] - imds.woVoV[wkm, wkb, wkj].transpose(0,1,3,2)
Hr1 = np.zeros_like(r1)
for ki in range(nkpts):
# ki - ka = kshift
ka = kconserv_r1[ki]
# r_ia <- - F_mi r_ma
# km = ki
Hr1[ki] -= einsum('mi,ma->ia', imds.Foo[ki], r1[ki])
# r_ia <- F_ac r_ic
Hr1[ki] += einsum('ac,ic->ia', imds.Fvv[ka], r1[ki])
for km in range(nkpts):
# r_ia <- (2 W_amie - W_maie) r_me
# km - ke = kshift
ke = kconserv_r1[km]
Hr1[ki] += 2. * einsum('maei,me->ia', imds.woVvO[km, ka, ke], r1[km])
Hr1[ki] -= einsum('maie,me->ia', imds.woVoV[km, ka, ki], r1[km])
# r_ia <- F_me (2 r_imae - r_miae)
Hr1[ki] += 2. * einsum('me,imae->ia', imds.Fov[km], r2[ki, km, ka])
Hr1[ki] -= einsum('me,miae->ia', imds.Fov[km], r2[km, ki, ka])
for ke in range(nkpts):
# r_ia <- (2 W_amef - W_amfe) r_imef
Hr1[ki] += 2. * einsum('amef,imef->ia', imds.wvOvV[ka, km, ke], r2[ki, km, ke])
# ka + km - ke - kf = G
kf = kconserv[ka, ke, km]
Hr1[ki] -= einsum('amfe,imef->ia', imds.wvOvV[ka, km, kf], r2[ki, km, ke])
# r_ia <- -W_mnie (2 r_mnae - r_nmae)
# Rename dummy index ke -> kn
kn = ke
Hr1[ki] -= 2. * np.einsum('mnie,mnae->ia', imds.woOoV[km, kn, ki], r2[km, kn, ka])
Hr1[ki] += np.einsum('mnie,nmae->ia', imds.woOoV[km, kn, ki], r2[kn, km, ka])
Hr2 = np.zeros_like(r2)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# ki + kj - ka - kb = kshift
kb = kconserv_r2[ki, ka, kj]
# r_ijab <= - F_mj r_imab
# km = kj
Hr2[ki, kj, ka] -= einsum('mj,imab->ijab', imds.Foo[kj], r2[ki, kj, ka])
# r_ijab <= - F_mi r_jmba
# km = ki
Hr2[ki, kj, ka] -= einsum('mi,jmba->ijab', imds.Foo[ki], r2[kj, ki, kb])
# r_ijab <= F_be r_ijae
Hr2[ki, kj, ka] += einsum('be,ijae->ijab', imds.Fvv[kb], r2[ki, kj, ka])
# r_ijab <= F_ae r_jibe
Hr2[ki, kj, ka] += einsum('ae,jibe->ijab', imds.Fvv[ka], r2[kj, ki, kb])
# r_ijab <= W_abej r_ie
# ki - ke = kshift
ke = kconserv_r1[ki]
Hr2[ki, kj, ka] += einsum('abej,ie->ijab', imds.wvVvO[ka, kb, ke], r1[ki])
# r_ijab <= W_baei r_je
# kj - ke = kshift
ke = kconserv_r1[kj]
Hr2[ki, kj, ka] += einsum('baei,je->ijab', imds.wvVvO[kb, ka, ke], r1[kj])
# r_ijab <= - W_mbij r_ma
# km + kb - ki - kj = G
# => ki - kb + kj - km = G
km = kconserv[ki, kb, kj]
Hr2[ki, kj, ka] -= einsum('mbij,ma->ijab', imds.woVoO[km, kb, ki], r1[km])
# r_ijab <= - W_maji r_mb
# km + ka - kj - ki = G
# => ki -ka + kj - km = G
km = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] -= einsum('maji,mb->ijab', imds.woVoO[km, ka, kj], r1[km])
tmp = np.zeros((nocc, nocc, nvir, nvir), dtype=r2.dtype)
for km in range(nkpts):
# r_ijab <= (2 W_mbej - W_mbje) r_imae - W_mbej r_imea
# km + kb - ke - kj = G
ke = kconserv[km, kj, kb]
tmp += einsum('mbej,imae->ijab', woVvO_bar[km, kb, ke], r2[ki, km, ka])
tmp -= einsum('mbej,imea->ijab', imds.woVvO[km, kb, ke], r2[ki, km, ke])
# r_ijab <= - W_maje r_imeb
# km + ka - kj - ke = G
ke = kconserv[km, kj, ka]
tmp -= einsum('maje,imeb->ijab', imds.woVoV[km, ka, kj], r2[ki, km, ke])
Hr2[ki, kj, ka] += tmp
# The following two lines can be obtained by simply transposing tmp:
# r_ijab <= (2 W_maei - W_maie) r_jmbe - W_maei r_jmeb
# r_ijab <= - W_mbie r_jmea
Hr2[kj, ki, kb] += tmp.transpose(1,0,3,2)
tmp = None
for km in range(nkpts):
# r_ijab <= W_abef r_ijef
# Rename dummy index km -> ke
ke = km
Hr2[ki, kj, ka] += einsum('abef,ijef->ijab', imds.wvVvV[ka, kb, ke], r2[ki, kj, ke])
# r_ijab <= W_mnij r_mnab
# km + kn - ki - kj = G
# => ki - km + kj - kn = G
kn = kconserv[ki, km, kj]
Hr2[ki, kj, ka] += einsum('mnij,mnab->ijab', imds.woOoO[km, kn, ki], r2[km, kn, ka])
#
# r_ijab <= - W_mnef t_imab (2 r_jnef - r_jnfe)
# r_ijab <= - W_mnef t_jmba (2 r_inef - r_infe)
# r_ijab <= - W_mnef t_ijae (2 r_mnbf - r_mnfb)
# r_ijab <= - W_mnef t_jibe (2 r_mnaf - r_mnfa)
#
# r_ijab <= - (2 W_nmie - W_nmei) t_jnba r_me
# r_ijab <= - (2 W_nmje - W_nmej) t_inab r_me
# r_ijab <= + (2 W_amfe - W_amef) t_jibf r_me
# r_ijab <= + (2 W_bmfe - W_bmef) t_ijaf r_me
#
# First, build intermediates M = W.r
#
wr2_oo = np.zeros((nkpts, nocc, nocc), dtype=r2.dtype)
wr2_vv = np.zeros((nkpts, nvir, nvir), dtype=r2.dtype)
wr1_oo = np.zeros_like(wr2_oo)
wr1_vv = np.zeros_like(wr2_vv)
for kj in range(nkpts):
# Wr2_jm = W_mnef (2 r_jnef - r_jnfe) = W_mnef rbar_jnef
# km + kn - ke - kf = G
# kj + kn - ke - kf = kshift
# => kj - km = kshift
km = kconserv_r1[kj]
# x: kn, y: ke
wr2_oo[kj] += einsum('xymnef,xyjnef->jm', imds.woOvV[km], r2bar[kj])
# Wr2_eb = W_mnef (2 r_mnbf - r_mnfb) = W_mnef rbar_mnbf
ke = kj
# km + kn - ke - kf = G
# km + kn - kb - kf = kshift
# => ke - kb = kshift
kb = kconserv_r1[ke]
# x: km, y: kn
wr2_vv[ke] += einsum('xymnef,xymnbf->eb', imds.woOvV[:, :, ke], r2bar[:, :, kb])
# Wr1_in = (2 W_nmie - W_nmei) r_me = wbar_nmie r_me
ki = kj
# kn + km - ki - ke = G
# km - ke = kshift
# => ki - kn = kshift
kn = kconserv_r1[ki]
# x: km
wr1_oo[ki] += einsum('xnmie,xme->in', woOoV_bar[kn, :, ki], r1)
# Wr1_fa = (2 W_amfe - W_amef) r_me = wbar_amfe r_me
kf = kj
# ka + km - kf - ke = G
# km - ke = kshift
# => kf - ka = kshift
ka = kconserv_r1[kf]
# x: km
wr1_vv[kf] += einsum('xamfe,xme->fa', wvOvV_bar[ka, :, kf], r1)
#
# Second, compute the whole contraction
#
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# ki + kj - ka - kb = kshift
kb = kconserv_r2[ki, ka, kj]
# r_ijab <= - Wr2_jm t_imab
# kj - km = kshift
km = kconserv_r1[kj]
Hr2[ki, kj, ka] -= einsum('jm,imab->ijab', wr2_oo[kj], imds.t2[ki, km, ka])
# r_ijab <= - Wr2_im t_jmba
# ki - km = kshift
km = kconserv_r1[ki]
Hr2[ki, kj, ka] -= einsum('im,jmba->ijab', wr2_oo[ki], imds.t2[kj, km, kb])
# r_ijab <= - Wr2_eb t_ijae
# ki + kj - ka - ke = G
ke = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] -= einsum('eb,ijae->ijab', wr2_vv[ke], imds.t2[ki, kj, ka])
# r_ijab <= - Wr2_ea t_jibe
# kj + ki - kb - ke = G
ke = kconserv[kj, kb, ki]
Hr2[ki, kj, ka] -= einsum('ea,jibe->ijab', wr2_vv[ke], imds.t2[kj, ki, kb])
# r_ijab <= - Wr1_in t_jnba
# ki - kn = kshift
kn = kconserv_r1[ki]
Hr2[ki, kj, ka] -= einsum('in,jnba->ijab', wr1_oo[ki], imds.t2[kj, kn, kb])
# r_ijab <= - Wr1_jn t_inab
# kj - kn = kshift
kn = kconserv_r1[kj]
Hr2[ki, kj, ka] -= einsum('jn,inab->ijab', wr1_oo[kj], imds.t2[ki, kn, ka])
# r_ijab <= Wr1_fa t_jibf
# kj + ki - kb - kf = G
kf = kconserv[kj, kb, ki]
Hr2[ki, kj, ka] += einsum('fa,jibf->ijab', wr1_vv[kf], imds.t2[kj, ki, kb])
# r_ijab <= Wr1_fb t_ijaf
# ki + kj - ka - kf = G
kf = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] += einsum('fb,ijaf->ijab', wr1_vv[kf], imds.t2[ki, kj, ka])
cput1 = log.timer_debug1("contraction", *cput1)
vector = amplitudes_to_vector_singlet(Hr1, Hr2, kconserv_r2)
log.timer_debug1("amplitudes_to_vector_singlet", *cput1)
log.timer("matvec EOMEE Singlet", *cput0)
return vector
def eeccsd_diag(eom, kshift=0, imds=None):
'''Diagonal elements of similarity-transformed Hamiltonian'''
if imds is None: imds = eom.make_imds()
t1 = imds.t1
nkpts, nocc, nvir = t1.shape
kconserv = eom.kconserv
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
kconserv_r2 = eom.get_kconserv_ee_r2(kshift)
Hr1 = np.zeros((nkpts, nocc, nvir), dtype=t1.dtype)
for ki in range(nkpts):
ka = kconserv_r1[ki]
Hr1[ki] -= imds.Foo[ki].diagonal()[:,None]
Hr1[ki] += imds.Fvv[ka].diagonal()[None,:]
Hr1[ki] += np.einsum('iaai->ia', imds.woVvO[ki, ka, ka])
Hr1[ki] -= np.einsum('iaia->ia', imds.woVoV[ki, ka, ki])
Hr2 = np.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=t1.dtype)
# TODO Allow partition='mp'
if eom.partition == "mp":
raise NotImplementedError
else:
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv_r2[ki, ka, kj]
Hr2[ki, kj, ka] -= imds.Foo[ki].diagonal()[:, None, None, None]
Hr2[ki, kj, ka] -= imds.Foo[kj].diagonal()[None, :, None, None]
Hr2[ki, kj, ka] += imds.Fvv[ka].diagonal()[None, None, :, None]
Hr2[ki, kj, ka] += imds.Fvv[kb].diagonal()[None, None, None, :]
Hr2[ki, kj, ka] += np.einsum('jbbj->jb', imds.woVvO[kj, kb, kb])[None, :, None, :]
Hr2[ki, kj, ka] -= np.einsum('jbjb->jb', imds.woVoV[kj, kb, kj])[None, :, None, :]
Hr2[ki, kj, ka] -= np.einsum('jaja->ja', imds.woVoV[kj, ka, kj])[None, :, :, None]
Hr2[ki, kj, ka] -= np.einsum('ibib->ib', imds.woVoV[ki, kb, ki])[:, None, None, :]
Hr2[ki, kj, ka] += np.einsum('iaai->ia', imds.woVvO[ki, ka, ka])[:, None, :, None]
Hr2[ki, kj, ka] -= np.einsum('iaia->ia', imds.woVoV[ki, ka, ki])[:, None, :, None]
Hr2[ki, kj, ka] += np.einsum('abab->ab', imds.wvVvV[ka, kb, ka])[None, None, :, :]
Hr2[ki, kj, ka] += np.einsum('ijij->ij', imds.woOoO[ki, kj, ki])[:, :, None, None]
# ki - ka + km - kb = G
# => ka - ki + kb - km = G
km = kconserv[ka, ki, kb]
Hr2[ki, kj, ka] -= np.einsum('imab,imab->iab', imds.woOvV[ki, km, ka], imds.t2[ki, km, ka])[:, None, :, :]
# km - ka + kj - kb = G
# => ka - kj + kb - km = G
km = kconserv[ka, kj, kb]
Hr2[ki, kj, ka] -= np.einsum('mjab,mjab->jab', imds.woOvV[km, kj, ka], imds.t2[km, kj, ka])[None, :, :, :]
# ki - ka + kj - ke = G
Hr2[ki, kj, ka] -= np.einsum('ijae,ijae->ija', imds.woOvV[ki, kj, ka], imds.t2[ki, kj, ka])[:, :, :, None]
# ki - ke + kj - kb = G
ke = kconserv[ki, kb, kj]
Hr2[ki, kj, ka] -= np.einsum('ijeb,ijeb->ijb', imds.woOvV[ki, kj, ke], imds.t2[ki, kj, ke])[:, :, None, :]
vector = amplitudes_to_vector_singlet(Hr1, Hr2, kconserv_r2)
return vector
def eeccsd_matvec_singlet_Hr1(eom, vector, kshift, imds=None):
'''A mini version of eeccsd_matvec_singlet(), in the sense that
only Hbar.r1 is performed.'''
if imds is None: imds = eom.make_imds()
nkpts = eom.nkpts
nocc = eom.nocc
nvir = eom.nmo - nocc
r1_size = nkpts * nocc * nvir
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
if len(vector) != r1_size:
raise ValueError("vector length mismatch: expected {0}, "
"found {1}".format(r1_size, len(vector)))
r1 = vector.reshape(nkpts, nocc, nvir)
Hr1 = np.zeros_like(r1)
for ki in range(nkpts):
# ki - ka = kshift
ka = kconserv_r1[ki]
# r_ia <- - F_mi r_ma
# km = ki
Hr1[ki] -= einsum('mi,ma->ia', imds.Foo[ki], r1[ki])
# r_ia <- F_ac r_ic
Hr1[ki] += einsum('ac,ic->ia', imds.Fvv[ka], r1[ki])
for km in range(nkpts):
# r_ia <- (2 W_amie - W_maie) r_me
# km - ke = kshift
ke = kconserv_r1[km]
Hr1[ki] += 2. * einsum('maei,me->ia', imds.woVvO[km, ka, ke], r1[km])
Hr1[ki] -= einsum('maie,me->ia', imds.woVoV[km, ka, ki], r1[km])
return Hr1.ravel()
def eeccsd_cis_approx_slow(eom, kshift, nroots=1, imds=None, **kwargs):
'''Build initial R vector through diagonalization of <r1|Hbar|r1>
This method evaluates the matrix elements of Hbar in r1 space in the following way:
- 1st col of Hbar = matvec(r1_col1) where r1_col1 = [1, 0, 0, 0, ...]
- 2nd col of Hbar = matvec(r1_col2) where r1_col2 = [0, 1, 0, 0, ...]
- and so on
Note that such evaluation has N^3 cost, but error free (because matvec() has been proven correct).
'''
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None: imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
dtype = imds.t1.dtype
r1_size = nkpts * nocc * nvir
H1 = np.zeros([r1_size, r1_size], dtype=dtype)
for col in range(r1_size):
vec = np.zeros(r1_size, dtype=dtype)
vec[col] = 1.0
H1[:, col] = eeccsd_matvec_singlet_Hr1(eom, vec, kshift, imds=imds)
eigval, eigvec = np.linalg.eig(H1)
idx = eigval.argsort()[:nroots]
eigval = eigval[idx]
eigvec = eigvec[:, idx]
log.timer("EOMEE CIS approx", *cput0)
return eigval, eigvec
def get_init_guess_cis(eom, kshift, nroots=1, imds=None, **kwargs):
'''Build initial R vector through diagonalization of <r1|Hbar|r1>
Check eeccsd_cis_approx_slow() for details.
'''
if imds is None: imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
dtype = imds.t1.dtype
r1_size = nkpts * nocc * nvir
vector_size = eom.vector_size(kshift)
eigval, eigvec = eeccsd_cis_approx_slow(eom, kshift, nroots, imds)
guess = []
for i in range(nroots):
g = np.zeros(int(vector_size), dtype=dtype)
g[:r1_size] = eigvec[:, i]
guess.append(g)
return guess
def cis_easy(eom, nroots=1, kptlist=None, imds=None, **kwargs):
'''An easy implementation of k-point CIS based on EOMCC infrastructure.'''
print("\n******** <function 'pyscf.pbc.cc.eom_kccsd_rhf.cis_easy'> ********")
if imds is None:
cc = eom._cc
t1_old, t2_old = cc.t1.copy(), cc.t2.copy()
# Zero t1, t2
cc.t1 = np.zeros_like(t1_old)
cc.t2 = np.zeros_like(t2_old)
# Remake intermediates using zero t1, t2 => get bare Hamiltonian back
imds = eom.make_imds()
# Recover t1, t2 so that the following calculations based on `eom` are
# not affected.
cc.t1, cc.t2 = None, None
cc.t1, cc.t2 = t1_old, t2_old
evals = [None]*len(kptlist)
evecs = [None]*len(kptlist)
for k, kshift in enumerate(kptlist):
print("\nkshift =", kshift)
eigval, eigvec = eeccsd_cis_approx_slow(eom, kshift, nroots, imds)
evals[k] = eigval
evecs[k] = eigvec
for i in range(nroots):
print('CIS root {:d} E = {:.16g}'.format(i, eigval[i].real))
return evals, evecs
class EOMEE(eom_kgccsd.EOMEE):
kernel = eeccsd
eeccsd = eeccsd
matvec = eeccsd_matvec
get_diag = eeccsd_diag
@property
def nkpts(self):
return len(self.kpts)
def vector_size(self, kshift=0):
raise NotImplementedError
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ee()
return imds
class EOMEESinglet(EOMEE):
kernel = eomee_ccsd_singlet
eomee_ccsd_singlet = eomee_ccsd_singlet
matvec = eeccsd_matvec_singlet
get_init_guess = get_init_guess_cis
cis = cis_easy
def vector_size(self, kshift=0):
'''Size of the linear excitation operator R vector based on spatial
orbital basis.
r1 : r_{i k_i}${a k_a}
r2 : r_{i k_i, J k_J}^{a k_a, B k_B}
Only r1aa, r2abab spin blocks are considered.
'''
nocc = self.nocc
nvir = self.nmo - nocc
nov = nocc * nvir
nkpts = self.nkpts
size_r1 = nkpts*nov
kconserv = self.get_kconserv_ee_r2(kshift)
size_r2 = 0
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
if kika == kjkb:
size_r2 += nov * (nov + 1) // 2
elif kika > kjkb:
size_r2 += nov**2
return size_r1 + size_r2
def gen_matvec(self, kshift, imds=None, left=False, **kwargs):
if imds is None: imds = self.make_imds()
diag = self.get_diag(kshift, imds)
if left:
# TODO allow left vectors to be computed
raise NotImplementedError
else:
matvec = lambda xs: [self.matvec(x, kshift, imds, diag) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, kshift=None, nkpts=None, nmo=None, nocc=None, kconserv=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
if nkpts is None: nkpts = self.nkpts
if kconserv is None: kconserv = self.get_kconserv_ee_r2(kshift)
return vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
if kconserv is None: kconserv = self.get_kconserv_ee_r2(kshift)
return amplitudes_to_vector_singlet(r1, r2, kconserv)
class EOMEETriplet(EOMEE):
def vector_size(self, kshift=0):
return None
class EOMEESpinFlip(EOMEE):
def vector_size(self, kshift=0):
return None
imd = imdk
class _IMDS:
# Identical to molecular rccsd_slow
def __init__(self, cc, eris=None):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
if eris is None:
eris = cc.ao2mo()
self.eris = eris
self.kconserv = cc.khelper.kconserv
self.made_ip_imds = False
self.made_ea_imds = False
self._made_shared_2e = False
# TODO: check whether to hold all stuff in memory
if getattr(self.eris, "feri1", None):
self._fimd = lib.H5TmpFile()
else:
self._fimd = None
def _make_shared_1e(self):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
self.Loo = imd.Loo(t1, t2, eris, kconserv)
self.Lvv = imd.Lvv(t1, t2, eris, kconserv)
self.Fov = imd.cc_Fov(t1, t2, eris, kconserv)
log.timer('EOM-CCSD shared one-electron intermediates', *cput0)
def _make_shared_2e(self):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
ovov_dest = self._fimd.create_dataset('ovov', (nkpts, nkpts, nkpts, nocc, nvir, nocc, nvir), t1.dtype.char)
ovvo_dest = self._fimd.create_dataset('ovvo', (nkpts, nkpts, nkpts, nocc, nvir, nvir, nocc), t1.dtype.char)
else:
ovov_dest = ovvo_dest = None
# 2 virtuals
self.Wovov = imd.Wovov(t1, t2, eris, kconserv, ovov_dest)
self.Wovvo = imd.Wovvo(t1, t2, eris, kconserv, ovvo_dest)
self.Woovv = eris.oovv
log.timer('EOM-CCSD shared two-electron intermediates', *cput0)
def make_ip(self, ip_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ip_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
oooo_dest = self._fimd.create_dataset('oooo', (nkpts, nkpts, nkpts, nocc, nocc, nocc, nocc), t1.dtype.char)
ooov_dest = self._fimd.create_dataset('ooov', (nkpts, nkpts, nkpts, nocc, nocc, nocc, nvir), t1.dtype.char)
ovoo_dest = self._fimd.create_dataset('ovoo', (nkpts, nkpts, nkpts, nocc, nvir, nocc, nocc), t1.dtype.char)
else:
oooo_dest = ooov_dest = ovoo_dest = None
# 0 or 1 virtuals
if ip_partition != 'mp':
self.Woooo = imd.Woooo(t1, t2, eris, kconserv, oooo_dest)
self.Wooov = imd.Wooov(t1, t2, eris, kconserv, ooov_dest)
self.Wovoo = imd.Wovoo(t1, t2, eris, kconserv, ovoo_dest)
self.made_ip_imds = True
log.timer('EOM-CCSD IP intermediates', *cput0)
def make_t3p2_ip(self, cc):
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ip() # Make after t1/t2 updated
self.Wovoo = self.Wovoo + Wovoo
self.made_ip_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a IP intermediates', *cput0)
return self
def make_ea(self, ea_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ea_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
vovv_dest = self._fimd.create_dataset('vovv', (nkpts, nkpts, nkpts, nvir, nocc, nvir, nvir), t1.dtype.char)
vvvo_dest = self._fimd.create_dataset('vvvo', (nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc), t1.dtype.char)
if eris.vvvv is not None:
vvvv_dest = self._fimd.create_dataset('vvvv', (nkpts, nkpts, nkpts, nvir, nvir, nvir, nvir), t1.dtype.char) # noqa: E501
else:
vovv_dest = vvvo_dest = vvvv_dest = None
# 3 or 4 virtuals
self.Wvovv = imd.Wvovv(t1, t2, eris, kconserv, vovv_dest)
if ea_partition == 'mp' and np.all(t1 == 0):
self.Wvvvo = imd.Wvvvo(t1, t2, eris, kconserv, vvvo_dest)
else:
if eris.vvvv is None:
self.Wvvvv = None
else:
self.Wvvvv = imd.Wvvvv(t1, t2, eris, kconserv, vvvv_dest)
self.Wvvvo = imd.Wvvvo(t1, t2, eris, kconserv, self.Wvvvv, vvvo_dest)
self.made_ea_imds = True
log.timer('EOM-CCSD EA intermediates', *cput0)
def make_t3p2_ea(self, cc):
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ea() # Make after t1/t2 updated
self.Wvvvo = self.Wvvvo + Wvvvo
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a EA intermediates', *cput0)
return self
def make_t3p2_ip_ea(self, cc):
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ip() # Make after t1/t2 updated
self.make_ea() # Make after t1/t2 updated
self.Wovoo = self.Wovoo + Wovoo
self.Wvvvo = self.Wvvvo + Wvvvo
self.made_ip_imds = True
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a IP/EA intermediates', *cput0)
return self
def make_ee(self, ee_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False:
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
# Rename imds to match the notations in pyscf.cc.eom_rccsd
self.Foo = self.Loo
self.Fvv = self.Lvv
self.woOvV = self.Woovv
self.woVvO = self.Wovvo
self.woVoV = self.Wovov
if not self.made_ip_imds:
# 0 or 1 virtuals
self.woOoO = imd.Woooo(t1, t2, eris, kconserv)
self.woOoV = imd.Wooov(t1, t2, eris, kconserv)
self.woVoO = imd.Wovoo(t1, t2, eris, kconserv)
else:
self.woOoO = self.Woooo
self.woOoV = self.Wooov
self.woVoO = self.Wovoo
if not self.made_ea_imds:
# 3 or 4 virtuals
self.wvOvV = imd.Wvovv(t1, t2, eris, kconserv)
self.wvVvV = imd.Wvvvv(t1, t2, eris, kconserv)
self.wvVvO = imd.Wvvvo(t1, t2, eris, kconserv, self.wvVvV)
else:
self.wvOvV = self.Wvovv
self.wvVvV = self.Wvvvv
self.wvVvO = self.Wvvvo
self.made_ee_imds = True
log.timer('EOM-CCSD EE intermediates', *cput0)
def get_Wvvvv(self, ka, kb, kc):
if not self.made_ea_imds:
self.make_ea()
if self.Wvvvv is None:
return imd.get_Wvvvv(self.t1, self.t2, self.eris, self.kconserv,
ka, kb, kc)
else:
return self.Wvvvv[ka,kb,kc]
|
sunqm/pyscf
|
pyscf/pbc/cc/eom_kccsd_rhf.py
|
Python
|
apache-2.0
| 68,153
|
[
"PySCF"
] |
6b49294c039389fe047619d6ece8e067825b47ca87a6ccec76122b0106fb3322
|
from galaxy_analysis.plot.plot_styles import *
import numpy as np
import matplotlib.pyplot as plt
import deepdish as dd
import h5py, glob, sys
from galaxy_analysis.utilities import utilities
from galaxy_analysis.analysis import Galaxy
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import binned_statistic_2d
# temporary
import time as cpu_time
def load_abundance_data(data, data_list,
fields,
property_list,
phases = ['CNM','WNM','WIM','HIM'],
field_types = None):
time_data = {}
for i, field in enumerate(fields):
time_data[field] = {}
if field_types is None:
if '_Fraction' in field or '_over_' in field:
ft = 'mass_fraction'
# elif '_over_' in field:
# ft = 'abundance'
else:
print("Cannot properly determing field type for " + field)
raise RunTimeError
elif hasattr(field_types, 'keys') :
ft = field_types[field]
else:
ft = field_types[i]
for phase in phases:
time_data[field][phase] = {}
for property in property_list:
if property == 'IQR' or property == 'inner_quartile_range':
for p in ['Q1','Q3']:
field_path = [phase,ft,field,p]
# need to load the data here into a time array
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase][property] = np.log10(time_data[field][phase]['Q3']) -\
np.log10(time_data[field][phase]['Q1'])
else:
time_data[field][phase][property] = time_data[field][phase]['Q3'] -\
time_data[field][phase]['Q1']
elif property == 'mean_median_distance':
for p in ['median','mean']:
if not p in list(time_data[field][phase].keys()):
field_path = [phase,ft,field,p]
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase]['mean_median_distance'] =\
np.log10(time_data[field][phase]['mean']) -\
time_data[field][phase]['median']
else:
time_data[field][phase]['mean_median_distance'] = time_data[field][phase]['mean']-\
time_data[field][phase]['median']
elif property == 'inner_decile_range' or property == 'd9_d1_range':
for p in ['decile_1','decile_9']:
field_path = [phase,ft,field,p]
# need to load the data here into a time array
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase][property] = np.log10(time_data[field][phase]['decile_9']) -\
np.log10(time_data[field][phase]['decile_1'])
else:
time_data[field][phase][property] = time_data[field][phase]['decile_9']-\
time_data[field][phase]['decile_1']
else:
field_path = [phase,ft,field,property]
time_data[field][phase][property] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][property] = np.array(time_data[field][phase][property], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
if property in ['median','mean']:
time_data[field][phase][property] = np.log10(time_data[field][phase][property])
return time_data
def plot_stellar_2d_hist(galaxy, field, time_bins = np.arange(0.0,20.0,0.2),
ybins = np.arange(-20,-6,0.1)):
if '_Fraction' in field:
yval = np.log10(galaxy.df[('io','particle_' + field.strip('_Fraction') + '_fraction')].value)
else:
yval = galaxy.df[('io','particle_' + field)].value
creation_time = galaxy.df['creation_time'].to('Myr').value
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
statistic_data = np.ones(np.size(creation_time))
N, x_edge, y_edge, binnum = binned_statistic_2d(creation_time - np.min(creation_time), yval, statistic_data,
statistic = 'count', bins = (time_bins,ybins))
fraction = N / (1.0 * np.size(creation_time))
fraction[fraction <= 0] = -99
fraction[fraction > 0] = np.log10(fraction[fraction > 0])
fraction = np.log10(N / (1.0 * np.size(creation_time)))
plot_val = fraction
xmesh, ymesh = np.meshgrid(x_edge, y_edge)
img1 = ax.pcolormesh(xmesh, ymesh, plot_val.T,
cmap = 'magma', vmin = -4, vmax = -1)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = "Fraction")
ax.set_xlabel(r'Time (Myr)')
plt.minorticks_on()
plt.tight_layout()
fig.savefig('stellar_O_2d_hist.png')
return
def plot_stellar_separation(time, data, galaxy,
field = 'O_Fraction',
property = 'median',
phases = ['CNM'],
figdim=None,
field_types = None,
labels = None, line_styles = {},
xlim = None, ylim = None,
annotate_text = None, ylabels = None):
if labels is None:
labels = {}
for k in phases + [field] + [property]:
if not (k in list(labels.keys())):
labels[k] = k
if figdim is None:
if len(phases) == 1:
nrow, ncol = 1, 1
else:
ncol = 3
nrow = 2
else:
nrow, ncol = figdim
data_list = np.array(np.sort([x for x in list(data.keys()) if 'DD' in x]))
data_list = data_list[:len(time)]
time_data = load_abundance_data(data, data_list, [field], [property],
phases = phases, field_types = field_types)
# now need to load the data output to get the stellar values
#
if nrow*ncol > 1:
fig, all_axes = plt.subplots(nrow,ncol,sharex=True,sharey=True)
fig.set_size_inches(ncol*5, nrow*5)
fig.subplots_adjust(hspace=0.0,wspace=0.0)
else:
fig, ax = plt.subplots()
fig.set_size_inches(6,6)
axi = axj = 0
creation_time = galaxy.df['creation_time'].convert_to_units('Myr').value
for i, phase in enumerate(phases):
if nrow*ncol > 1:
ax = all_axes[(axi,axj)]
property_value = time_data[field][phase][property]
if '_Fraction' in field:
star_val = np.log10(galaxy.df['particle_' + field.strip('_Fraction') + '_fraction'])
else:
star_val = galaxy.df['particle_' + field]
select = creation_time > 0.0 #do all for now (np.min(creation_time) + 100.0)
distance = star_val[select] - np.interp(creation_time[select],time,property_value)
ax.scatter(creation_time[select] - np.min(creation_time),
distance, alpha = 0.75, color = 'black', s = 20)
ax.set_xlim(0.0, 500.0)
ax.set_ylim(-2.5,2.5)
ax.plot( ax.get_xlim(), [0.0,0.0], color = 'black', lw = line_width, ls = '--')
plt.minorticks_on()
xy = (200.0, ax.get_ylim()[1] - 0.35)
#q3dist = q3 - median_distance
#q1dist = median_distance - q1
# ax.annotate(labels[phase] + " : %.2f + %.2f - %.2f"%(median_distance,q3dist,q1dist), xy=xy,xytext=xy)
#d1, d9 = np.percentile(distance, [0.1,0.9])
select = creation_time > (np.min(creation_time) + 120.0)
ax.annotate("Median Dist. = %.2f"%(np.median(np.abs(distance[select]))), xy=xy,xytext=xy)
xy = (200.0, ax.get_ylim()[1] - 0.7)
q1, q3 = np.percentile(np.abs(distance[select]), [25,75])
print(q3, q1, q3-q1, np.size(distance[select][distance[select]<0])/(1.0*np.size(distance[select])), np.size(distance[select][distance[select]>0]))
ax.annotate("IQR = %.2f"%(q3-q1), xy=xy,xytext=xy)
axj = axj + 1
if axj >= ncol:
axj = 0
axi = axi + 1
#plt.tight_layout()
if nrow*ncol>1:
for i in np.arange(ncol):
all_axes[(nrow-1,i)].set_xlabel(r'Time (Myr)')
for i in np.arange(nrow):
all_axes[(i,0)].set_ylabel(r'Distance From ' + labels[property] + '[dex]')
else:
ax.set_xlabel(r'Time (Myr)')
if ylabels is None:
ax.set_ylabel(r'Distance to ' + labels[phase] + ' ' + labels[property] + ' [dex]')
else:
ax.set_ylabel(ylabels[0])
if nrow*ncol == 1:
plt.tight_layout()
fig.savefig("stellar_distance_to_median.png")
return
def plot_abundace_resolution_study():
fields = ['O_Fraction','Ba_Fraction']
property_list = ['d9_d1_range']
phases = ['CNM','WNM','WIM','HIM']
simulations = {'3pcH2' : './3pc_H2/abundances/gas_abundances.h5',
'6pcH2' : './6pc_H2/abundances/gas_abundances.h5'}
fig, ax = plt.subplots(2,2,sharex=True,sharey=True)
fig.set_size_inches(12,12)
fig.subplots_adjust(hspace=0.0,wspace=0.0)
all_time_data = {}
for sim in simulations:
data = h5py.File(simulations[sim])
data_list = np.sort([x for x in list(data.keys()) if 'DD' in x])
times = np.array([float(x.strip('DD')) for x in data_list])
all_time_data[sim] = load_abundance_data(data, data_list,
fields, property_list)
all_time_data[sim]['time'] = times - times[0]
ls = {'CNM':'-','WNM':'-','WIM':'-','HIM':':'}
for phase in phases:
ax[(0,0)].plot(all_time_data['3pcH2']['time'],
all_time_data['3pcH2']['O_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(0,1)].plot(all_time_data['6pcH2']['time'],
all_time_data['6pcH2']['O_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(1,0)].plot(all_time_data['3pcH2']['time'],
all_time_data['3pcH2']['Ba_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(1,1)].plot(all_time_data['6pcH2']['time'],
all_time_data['6pcH2']['Ba_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
for a1 in ax:
for a2 in a1:
a2.set_xlim(0, 500)
a2.set_ylim(0, 3)
a2.minorticks_on()
for i in [0,1]:
ax[(i,0)].set_ylabel(r'Inner Decile Range [dex]')
ax[(1,i)].set_xlabel(r'Time (Myr)')
x = 300
y = 2.7
size = 20
ax[(0,0)].text(x, y, r'O - 3.6 pc', color = 'black', size = size)
ax[(1,0)].text(x, y, r'Ba - 3.6 pc', color = 'black', size = size)
ax[(0,1)].text(x, y, r'O - 7.2 pc', color = 'black', size = size)
ax[(1,1)].text(x, y, r'Ba - 7.2 pc', color = 'black', size = size)
plt.minorticks_on()
fig.savefig('O_Ba_resolution_study.png')
return
def plot_abundance_evolution(time, data,
fields = ['O_Fraction','Ba_Fraction'],
property_list = ['median','IQR'],
phases = ['CNM','WNM','WIM','HIM'],
figdim=None,
field_types = None,
labels = None, line_styles = None,
xlim = None, ylim = None,
annotate_text = None, fsize=5):
if figdim is None:
ncol = len(property_list)
nrow = len(fields)
else:
nrow, ncol = figdim
data_list = np.array(np.sort([x for x in list(data.keys()) if 'DD' in x]))
data_list = data_list[:len(time)]
start = cpu_time.time()
time_data = load_abundance_data(data, data_list, fields, property_list,
phases = phases, field_types = field_types)
print("DATA LOADING TOOK %2.2E"%(cpu_time.time()- start))
fig, ax = plt.subplots(nrow,ncol)
fig.set_size_inches(fsize*ncol, fsize*nrow)
axi = 0
axj = 0
xval = np.array(time)
xval = xval - xval[0]
for field in fields:
for property in property_list:
if len(fields) == 1:
axindex = axj
else:
axindex = (axi,axj)
for phase in phases:
yval = time_data[field][phase][property] # - time_data[field]['CNM'][property]
if phase in list(line_styles.keys()):
ls = line_styles[phase]
else:
ls = '-'
ax[axindex].plot(xval, yval, lw = line_width,
ls = ls, color = color_dict[phase],
label = phase)
ax[axindex].set_xlabel(r'Time (Myr)')
if hasattr(labels[property],"keys"):
ax[axindex].set_ylabel(labels[property][field])
else:
ax[axindex].set_ylabel(labels[property])
if xlim is None:
ax[axindex].set_xlim(xval[0],xval[-1])
else:
ax[axindex].set_xlim(xlim)
if not (ylim is None):
if np.size(fields) == 1:
ymin,ymax = ylim[axindex]
else:
ymin,ymax = ylim[axindex[0]][axindex[1]]
ax[axindex].set_ylim(ymin,ymax)
axj = axj + 1
if axj >= ncol:
axj = 0
axi = axi + 1
if len(fields) == 1:
indexzero=0
else:
indexzero=(0,0)
ax[indexzero].legend(loc='lower right', ncol=2)
if not annotate_text is None:
for axi in len(fields):
if len(fields) == 1:
axindex0 = axi
axindex1 = axi
else:
axindex0 = (axi,1)
axindex1 = (axi,0)
xy = annotate_text[axindex0]
ax[axindex1].annotate( annotate_text[axindex0], xy,xy)
plt.minorticks_on()
#plt.tight_layout()
fig.savefig( '_'.join(fields + property_list) + '_evolution.png')
return fig, ax
if __name__ == "__main__":
# plot_abundace_resolution_study()
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = './gas_abundances.h5'
data = h5py.File(filename)
#time = np.arange(0.0, 10.0*(len( [x for x in data.keys() if 'DD' in x])-1) + 0.1, 10.0)
time = [float(x.strip('DD')) for x in np.sort(list(data.keys())) if 'DD' in x]
galaxy = Galaxy('DD0619')
plot_stellar_2d_hist(galaxy, 'O_Fraction')
plot_stellar_separation(time, data, galaxy,
field = 'O_Fraction',
property = 'median',
phases = ['CNM'],
labels = {'median' : 'Median'},
ylabels = [r"[O/H] - [O/H]$_{\rm CNM}$"])
plot_abundance_evolution(time, data,
fields = ['O_Fraction','Ba_Fraction'],
phases = ['CNM','WNM','WIM','HIM'],
property_list = ['median','mean_median_distance','IQR','d9_d1_range'],
labels = {'median' : r'log(Median)',
'IQR' : r'Inner Quartile Range [dex]',
'mean_median_distance' : r'log(Mean) - log(Median) [dex]',
'd9_d1_range' : r'Inner Decile Range [dex]'},
line_styles = {'HIM' : ':'},
ylim = [ [(-8,-2),(0.,1.5),(0.0,1.5),(0.0,3.0)], [(-16,-10),(0.,1.5),(0.0,1.5),(0.0,3.0)]],
# ylim = [ [(-1,8),(-3,3),(-3,3.0)], [(-1,8),(-3,3),(-3,3.0)]],
annotate_text = [ ('Oxygen', (20.0,-2.5)), ('Barium',(20,-10.5))],
xlim = (0.0, 500.0))
plot_abundance_evolution(time, data,
fields = ['O_Fraction','N_Fraction'],
phases = ['CNM','WNM','WIM','HIM'],
property_list = ['median','IQR','d9_d1_range'],
labels = {'median' : r'log(Median)',
'IQR' : r'Inner Quartile Range [dex]',
'd9_d1_range' : r'Inner Decile Range [dex]'},
line_styles = {'HIM' : ':'},
ylim = [ [(-8,-2),(0.0,1.5),(0.0,3.0)], [(-9,-3),(0.0,1.5),(0.0,3.0)]],
# ylim = [ [(-1,8),(-3,3),(-3,3.0)], [(-1,8),(-3,3),(-3,3.0)]],
annotate_text = [ ('Oxygen', (20.0,-2.5)), ('Nitrogen',(-8.5,-10.5))])
|
aemerick/galaxy_analysis
|
paperII/abundace_evolution.py
|
Python
|
mit
| 19,582
|
[
"Galaxy"
] |
e50036dd2b9f4a5a2dea0c8dcefcf04980e834b4c6aafc76169f1d45aa14928f
|
# -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import random
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# # fname = 'Oxford_English_Dictionary.txt'
# # txt = []
# # with open(fname,'rb') as f:
# # txt = f.readlines()
# # txt = [x.decode('utf-8').strip() for x in txt]
# # txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# # List of words
# # word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # # List of definitions
# # def_list = [x.split(' ', 1)[1].strip()for x in txt]
# with open('./training_data/training_data.pkl','rb') as raw:
# word_list,dl=pkl.load(raw)
# def_list=[]
# # def_list=[' '.join(defi) for defi in def_list]
# i=0
# wd={}
# while i<len( dl):
# defi=dl[i]
# if len(defi)>0:
# def_list+=[' '.join(defi)]
# i+=1
# if word_list[i-1] not in wd:
# wd[word_list[i-1]]=[]
# wd[word_list[i-1]].append(def_list[-1])
# else:
# dl.pop(i)
# word_list.pop(i)
# maxlen=0
# minlen=100
# for defi in def_list:
# minlen=min(minlen,len(defi.split()))
# maxlen=max(maxlen,len(defi.split()))
# print(minlen)
# print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n,captlist=capts)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
if capts is not None:
# y,mask,auxsent,auxmask,auxword,auxchoices = map_one_hot(capts[:num_samples],_map,maxlen,n,aux=True,wd=wd)
# np.save('maskmainaux',mask)
# np.save('ycoh',y)
# np.save('yaux',auxsent)
# np.save('maskaux',auxmask)
# np.save('Xaux',auxword)
# np.save('caux',auxchoices)
print capts
y=np.load('ycoh.npy','r')
auxmask=np.load('maskaux.npy','r')
mask=np.load('maskmainaux.npy','r')
auxword=np.load('Xaux.npy','r')
auxsent=np.load('yaux.npy','r')
auxchoices=np.load('caux.npy','r')
else:
# np.save('X',X)
# np.save('yc',y)
# np.save('maskc',mask)
mask=np.load('maskaoh.npy','r')
y=np.load('yaoh.npy','r')
X=np.load('Xaoh.npy','r')
print (np.max(y))
if capts is not None:
return X, y,mask,rev_map,auxsent,auxmask,auxword,auxchoices
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n,captlist=None):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
# print (len(words))
counts2=defaultdict(int)
if captlist is not None:
for line in captlist:
for word in line.split():
if word=='#START#':
continue
counts2[word]+=1
print len(counts.keys()),len(counts2.keys())
words=list(map(lambda z:z[0],reversed(sorted(counts2.items(),key=lambda x:x[1]))))[:n-len(words)]
# random.shuffle(words)
words=words[:3000]
for word in words:
if word in counts:
del counts[word]
print len(counts.keys()),len(counts2.keys())
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num).astype(np.uint32)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
_map['#START#']=0
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
# print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n,aux=None,wd=None):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
rtn3=[]
rtn3=np.zeros([len(corpus),5,maxlen+2],dtype=np.int32)
rtn4=[]
rtn4=np.zeros([len(corpus),5,maxlen+2],dtype=np.float32)
rtn5=[]
rtn5=np.zeros([len(corpus),5,1],dtype=np.int32)
rtn6=[]
rtn6=np.zeros([len(corpus),5,1],dtype=np.float32)
for l,_line in enumerate(corpus):
x=0
line=_line.split()
auxlist=[]
auxmask=[]
auxword=[]
auxchoices=[]
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
y=0
if not (wd is None) and mapped!=n+1 and line[i] in wd:
tempsent=np.zeros([1,maxlen+2],dtype=np.int32)
sent=random.choice(wd[line[i]])
tempmask=np.zeros([1,maxlen+2],dtype=np.float32)
tempword=np.ones([1,1],dtype=np.int32)
tempmask[0,1]=1.0
tempword*=mapped
tempchoice=np.ones([1,1],dtype=np.float32)
for j in range(min(len(sent),maxlen)):
m2=_map[sent[j]]
tempsent[0,j+1]=m2
tempmask[0,j+1]=1.0
y=j+1
tempsent[0,y]=n+2
tempmask[0,y]=1.0
auxlist.append(tempsent)
auxmask.append(tempmask)
auxword.append(tempword)
auxchoices.append(tempchoice)
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
ilist=np.arange(len(auxlist))
if len(auxlist)>=5:
random.shuffle(ilist)
auxlist=np.concatenate(auxlist)
auxlist=auxlist[ilist[:5]]
elif len(auxlist)>0:
# print auxlist
# print [x.shape for x in auxlist]
auxlist+=[np.zeros([5-len(auxlist),maxlen+2],dtype=np.int32)]
# print auxlist
# print [x.shape for x in auxlist]
auxlist=np.concatenate(auxlist)
else:
auxlist=np.zeros([5,maxlen+2],dtype=np.int32)
# rtn3.append(auxlist)
rtn3[l,:,:]=auxlist
# print rtn3
if len(auxmask)>=5:
auxmask=np.concatenate(auxmask)
auxmask=auxmask[ilist[:5]]
elif len(auxmask)>0:
auxmask+=[np.zeros([5-len(auxmask),maxlen+2],dtype=np.float32)]
auxmask=np.concatenate(auxmask)
else:
auxmask=np.zeros([5,maxlen+2],dtype=np.float32)
# rtn4.append(auxmask)
# print l
rtn4[l,:,:]=auxmask
if len(auxword)>=5:
auxword=np.concatenate(auxword)
auxword=auxword[ilist[:5]]
elif len(auxlist)>0:
auxword+=[np.zeros([5-len(auxword),1],dtype=np.int32)]
auxword=np.concatenate(auxword)
else:
auxword=np.zeros([5,1],dtype=np.int32)
# rtn5.append(auxword)
rtn5[l,:,:]=auxword
if len(auxchoices)>=5:
auxchoices=np.concatenate(auxchoices)
auxchoices=auxchoices[ilist[:5]]
elif len(auxlist)>0:
auxchoices+=[np.zeros([5-len(auxchoices),1],dtype=np.float32)]
auxchoices=np.concatenate(auxchoices)
else:
auxchoices=np.zeros([5,1],dtype=np.float32)
# rtn6.append(auxchoices)
rtn6[l,:,:]=auxchoices
print (nopes,totes,wtf)
if not (aux is None):
# print np.array(rtn6)[-1],np.array(rtn4)[-1]
# return rtn,mask,np.array(rtn3),np.array(rtn4),np.array(rtn5),np.array(rtn6)
return rtn,mask,rtn3,rtn4,rtn5,rtn6
else:
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
self.word_embedding = tf.Variable(tf.random_uniform([self.n_z, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
self.dlstm = tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_z], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_z]), name='word_encoding_bias')
with tf.device('/cpu:0'):
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
self.auxy_in=tf.placeholder(tf.int32,[self.batch_size,5,self.n_lstm_steps])
self.auxy=tf.reshape(self.auxy_in,[self.batch_size*5,-1])
self.Xaux_in=tf.placeholder(tf.int32,[self.batch_size,5,1])
self.Xaux=tf.reshape(self.Xaux_in,[self.batch_size*5])
self.auxmask_in=tf.placeholder(tf.float32,[self.batch_size,5,self.n_lstm_steps])
self.auxmask=tf.reshape(self.auxmask_in,[self.batch_size*5,-1])
self.auxchoices_in=tf.placeholder(tf.float32,[self.batch_size,5,1])
self.auxchoices=tf.reshape(self.auxchoices_in,[self.batch_size*5,-1])
self.flatauxchoices=tf.reshape(self.auxchoices,[-1])
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
self.network_weights=network_weights
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True)
embedded_input=tf.stop_gradient(embedded_input)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)
KLD_loss=tf.stop_gradient(KLD_loss)
with tf.device('/cpu:0'):
word_embeddings=tf.nn.embedding_lookup(self.embw,flat_caption_placeholder)
word_embeddings+=self.embb
word_embeddings=tf.reshape(word_embeddings,[self.batch_size,self.n_lstm_steps,-1])
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.n_lstm_steps,-1])
# embedded_input=tf.nn.l2_normalize(embedded_input,dim=-1)
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
total_loss=0
with tf.variable_scope("RNN"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = word_embeddings[:,i-1,:]
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i>0:
out=tf.matmul(out,self.word_encoding)+self.word_encoding_bias
total_loss+=tf.reduce_sum(tf.reduce_sum(tf.square((tf.matmul(out,self.word_encoding)+self.word_encoding_bias)-embedded_input[:,i,:]),axis=-1)*mask[:,i])
#perform classification of output
# rnn_output=tf.concat(rnn_output,axis=1)
# rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
# encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
# encoded_output=tf
# #get loss
# # normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
# # normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
# # cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
# # cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# # cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# # cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
# # cos_sim=cos_sim/tf.reduce_sum(mask[:,1:])
# # self.exp_loss=tf.reduce_sum((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# # total_loss = tf.reduce_sum(-(cos_sim))
# mse=tf.reduce_sum(tf.reshape(tf.square(encoded_output-embedded_input),[self.batch_size,self.n_lstm_steps,-1]),axis=-1)[:,1:]*(mask[:,1:])
# mse=tf.reduce_sum(mse)/tf.reduce_sum(mask[:,1:])
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
# total_loss=mse
self.print_loss=total_loss
total_loss+=KLD_loss
total_loss/=tf.reduce_sum(mask[:,1:])
self.print_loss=total_loss
total_loss+=self.get_aux_loss()
return total_loss, img, caption_placeholder, mask
def get_aux_loss(self):
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
network_weights=self.network_weights
seqlen=tf.cast(tf.reduce_sum(self.auxmask,reduction_indices=-1),tf.int32)
KLD_penalty=1e-3
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
if not same_embedding:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
else:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
state = self.dlstm.zero_state(self.batch_size*5, dtype=tf.float32)
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNNaux"):
for i in range(self.n_lstm_steps):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.auxy[:,i-1])
current_embedding=tf.matmul(current_embedding,network_weights['LSTM']['affine_weight'])+network_weights['LSTM']['affine_bias']
# if transfertype2:
# current_embedding=tf.stop_gradient(current_embedding)
loss+=tf.reduce_sum(KLD_loss*self.auxmask[:,i]*self.flatauxchoices)*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.dlstm(current_embedding, state)
if i > 0:
onehot=self.auxy[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
# if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.auxmask[:,i]*self.flatauxchoices
xentropy=tf.reduce_sum(xentropy)
# self.debug+=xentropy
loss += xentropy
# else:
# probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
# if not use_ctc:
# loss_ctc=0
# self.debug=self.debug/tf.reduce_sum(self.mask[:,1:])
# else:
# probs=tf.concat(probs,axis=1)
# probs=ctc_loss.get_output_probabilities(probs,self.auxy[:,1:,:])
# loss_ctc=ctc_loss.loss(probs,self.auxy[:,1:,:],self.n_lstm_steps-2,self.batch_size,seqlen-1)
# self.debug=tf.reduce_sum(input_embedding_KLD_loss)/self.batch_size*KLD_penalty+loss_ctc
self.aux_loss = (loss / tf.reduce_sum(self.auxmask[:,1:]*self.auxchoices))
self.aux_KLD=tf.reduce_sum(input_embedding_KLD_loss*self.flatauxchoices)*KLD_penalty#+loss_ctc
return self.aux_loss+self.aux_KLD
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(previous_word, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
with tf.device("/cpu:0"):
# get the embedding of the best_word to use as input to the next iteration of our LSTM
previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=True
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(self.n_input, self.n_z),name='out_mean',trainable=trainability)
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': tf.Variable(xavier_init(self.n_input, self.n_z),name='out_log_sigma',trainable=trainability)}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(self.n_lstm_input,self.n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(self.n_input),name='encoding_bias')
}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
all_weights['LSTM']['lstm']= self.dlstm
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
print self.Xaux.shape
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.Xaux,lookup=True,sample=True)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
# x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
# all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,self.word_embedding)+self.embedding_bias
embedding=z
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False,sample=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla or sample:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla or sample:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla or sample:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla or sample:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 18
momentum = 0.9
n_epochs = 25
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, captmask, _map, auxy,auxmask,Xaux,auxchoices = load_text(50000-2,captions)
running_decay=1
decay_rate=0.9999302192204246
# with tf.device('/gpu:0'):
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_z).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
# tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('modelsvardefdefvarall'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[[],[],[],[]]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
current_mask_matrix=captmask[index[start:end]]
_, loss_value,total_loss,aux_KLD,aux_loss = sess.run([train_op, caption_generator.print_loss,loss,caption_generator.aux_KLD,caption_generator.aux_loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32),
caption_generator.auxy_in:auxy[index[start:end]],
caption_generator.Xaux_in:Xaux[index[start:end]],
caption_generator.auxmask_in:auxmask[index[start:end]],
caption_generator.auxchoices_in:auxchoices[index[start:end]]
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses[0].append(loss_value)
losses[1].append(aux_loss)
losses[2].append(aux_KLD)
losses[3].append(total_loss)
# losses.append(loss_value*running_decay)
# if epoch<9:
# if i%3==0:
# running_decay*=decay_rate
# else:
# if i%8==0:
# running_decay*=decay_rate
i+=1
print [x[-1] for x in losses]
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_e2e.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflowcs'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=50000
binary_dim=n_input
n_lstm_input=512
n_z=256
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1)
|
dricciardelli/vae2vec
|
capt_gen_o2e_aux_ms.py
|
Python
|
mit
| 39,724
|
[
"Gaussian"
] |
85721496c1929961a68dbaf177888e5eae24b3774d6c1c87a4d52879076701b7
|
"""This module defines an ASE interface to Turbomole
http://www.turbomole.com/
"""
import os
import sys
import numpy as np
from ase.units import Hartree, Bohr
from ase.io.turbomole import read_turbomole,write_turbomole
from ase.calculators.general import Calculator
class Turbomole(Calculator):
def __init__(self, label='turbomole',
calculate_energy='dscf', calculate_forces='grad',
post_HF = False):
self.label = label
self.converged = False
# set calculators for energy and forces
self.calculate_energy = calculate_energy
self.calculate_forces = calculate_forces
# turbomole has no stress
self.stress = np.empty(6)
# storage for energy and forces
self.e_total = None
self.forces = None
self.updated = False
# atoms must be set
self.atoms = None
# POST-HF method
self.post_HF = post_HF
def initialize(self, atoms):
self.numbers = atoms.get_atomic_numbers().copy()
self.species = []
for a, Z in enumerate(self.numbers):
self.species.append(Z)
self.converged = False
def execute(self, command):
from subprocess import Popen, PIPE
try:
# the sub process gets started here
proc = Popen([command], shell=True, stderr=PIPE)
error = proc.communicate()[1]
# check the error output
if 'abnormally' in error:
raise OSError(error)
#print 'TM command: ', command, 'successfully executed'
except OSError, e:
print >> sys.stderr, 'Execution failed:', e
sys.exit(1)
def get_potential_energy(self, atoms):
# update atoms
self.updated = self.e_total is None
self.set_atoms(atoms)
# if update of energy is neccessary
if self.update_energy:
# calculate energy
self.execute(self.calculate_energy + ' > ASE.TM.energy.out')
# check for convergence of dscf cycle
if os.path.isfile('dscf_problem'):
print 'Turbomole scf energy calculation did not converge'
raise RuntimeError(
'Please run Turbomole define and come thereafter back')
# read energy
self.read_energy()
#else:
# print 'taking old values (E)'
self.update_energy = False
return self.e_total
def get_forces(self, atoms):
# update atoms
self.updated = self.forces is None
self.set_atoms(atoms)
# complete energy calculations
if self.update_energy:
self.get_potential_energy(atoms)
# if update of forces is neccessary
if self.update_forces:
# calculate forces
self.execute(self.calculate_forces + ' > ASE.TM.forces.out')
# read forces
self.read_forces()
#else:
# print 'taking old values (F)'
self.update_forces = False
return self.forces.copy()
def get_stress(self, atoms):
return self.stress
def set_atoms(self, atoms):
if self.atoms == atoms:
if (self.updated and os.path.isfile('coord')):
self.updated = False
a = read_turbomole().get_positions()
if np.allclose(a,atoms.get_positions(), rtol=0, atol=1e-13):
return
else:
return
# performs an update of the atoms
write_turbomole('coord', atoms)
Calculator.set_atoms(self, atoms)
# energy and forces must be re-calculated
self.update_energy = True
self.update_forces = True
def read_energy(self):
"""Read Energy from Turbomole energy file."""
text = open('energy', 'r').read().lower()
lines = iter(text.split('\n'))
# Energy:
for line in lines:
if line.startswith('$end'):
break
elif line.startswith('$'):
pass
else:
energy_tmp = float(line.split()[1])
if self.post_HF:
energy_tmp += float(line.split()[4])
# update energy units
self.e_total = energy_tmp * Hartree
def read_forces(self):
"""Read Forces from Turbomole gradient file."""
file = open('gradient', 'r')
lines = file.readlines()
file.close()
forces = np.array([[0, 0, 0]])
nline = len(lines)
iline = -1
for i in range(nline):
if 'cycle' in lines[i]:
iline = i
if iline < 0:
raise RuntimeError('Please check TURBOMOLE gradients')
# next line
iline += len(self.atoms) + 1
# $end line
nline -= 1
# read gradients
for i in xrange(iline, nline):
line = lines[i].replace('D', 'E')
tmp = np.array([[float(f) for f in line.split()[0:3]]])
forces = np.concatenate((forces, tmp))
# Note the '-' sign for turbomole, to get forces
self.forces = (-np.delete(forces, np.s_[0:1], axis=0)) * Hartree / Bohr
|
askhl/ase
|
ase/calculators/turbomole.py
|
Python
|
gpl-2.0
| 5,333
|
[
"ASE",
"TURBOMOLE"
] |
5928a6fb7321db1772d5cf85f54edcd4c0373f12adf793d89475f663d506317c
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
from functools import partial
from collections import namedtuple
from time import sleep
from platform import python_implementation
from powerline.segments import shell, tmux, pdb, i3wm
from powerline.lib.vcs import get_fallback_create_watcher
from powerline.lib.unicode import out_u
import tests.vim as vim_module
from tests.lib import Args, urllib_read, replace_attr, new_module, replace_module_module, replace_env, Pl
from tests import TestCase, SkipTest
def get_dummy_guess(**kwargs):
if 'directory' in kwargs:
def guess(path, create_watcher):
return Args(branch=lambda: out_u(os.path.basename(path)), **kwargs)
else:
def guess(path, create_watcher):
return Args(branch=lambda: out_u(os.path.basename(path)), directory=path, **kwargs)
return guess
class TestShell(TestCase):
def test_last_status(self):
pl = Pl()
segment_info = {'args': Args(last_exit_code=10)}
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': '10', 'highlight_groups': ['exit_fail']}
])
segment_info['args'].last_exit_code = 0
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = None
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = 'sigsegv'
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail']}
])
segment_info['args'].last_exit_code = 'sigsegv+core'
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail']}
])
def test_last_pipe_status(self):
pl = Pl()
segment_info = {'args': Args(last_pipe_status=[])}
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 2, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': '2', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = [0, 'sigsegv', 'sigsegv+core']
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = [0, 'sigsegv', 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = [0, 'sigsegv+core', 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True}
])
def test_jobnum(self):
pl = Pl()
segment_info = {'args': Args(jobnum=0)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '0')
segment_info = {'args': Args(jobnum=1)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '1')
def test_continuation(self):
pl = Pl()
self.assertEqual(shell.continuation(pl=pl, segment_info={}), [{
'contents': '',
'width': 'auto',
'highlight_groups': ['continuation:current', 'continuation'],
}])
segment_info = {'parser_state': 'if cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
'width': 'auto',
'align': 'r',
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': 'IF'}), [
{
'contents': 'IF',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': None}), [
{
'contents': '',
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
segment_info = {'parser_state': 'then then then cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(shell, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info.update(shortened_path='~foo/ghi')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_shortened_path=False), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info.pop('shortened_path')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_groups': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
class TestTmux(TestCase):
def test_attached_clients(self):
def get_tmux_output(pl, cmd, *args):
if cmd == 'list-panes':
return 'session_name\n'
elif cmd == 'list-clients':
return '/dev/pts/2: 0 [191x51 xterm-256color] (utf8)\n/dev/pts/3: 0 [191x51 xterm-256color] (utf8)'
pl = Pl()
with replace_attr(tmux, 'get_tmux_output', get_tmux_output):
self.assertEqual(tmux.attached_clients(pl=pl), '2')
self.assertEqual(tmux.attached_clients(pl=pl, minimum=3), None)
class TestCommon(TestCase):
@classmethod
def setUpClass(cls):
module = __import__(str('powerline.segments.common.{0}'.format(cls.module_name)))
cls.module = getattr(module.segments.common, str(cls.module_name))
class TestNet(TestCommon):
module_name = 'net'
def test_hostname(self):
pl = Pl()
with replace_env('SSH_CLIENT', '192.168.0.12 40921 22') as segment_info:
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc')
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), 'abc')
segment_info['environ'].pop('SSH_CLIENT')
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), None)
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), None)
def test_external_ip(self):
pl = Pl()
with replace_attr(self.module, 'urllib_read', urllib_read):
self.assertEqual(self.module.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}])
def test_internal_ip(self):
try:
import netifaces
except ImportError:
raise SkipTest('netifaces module is not available')
pl = Pl()
addr = {
'enp2s0': {
netifaces.AF_INET: [{'addr': '192.168.100.200'}],
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777%enp2s0'}]
},
'lo': {
netifaces.AF_INET: [{'addr': '127.0.0.1'}],
netifaces.AF_INET6: [{'addr': '::1'}]
},
'teredo': {
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777'}]
},
}
interfaces = ['lo', 'enp2s0', 'teredo']
with replace_module_module(
self.module, 'netifaces',
interfaces=(lambda: interfaces),
ifaddresses=(lambda interface: addr[interface]),
AF_INET=netifaces.AF_INET,
AF_INET6=netifaces.AF_INET6,
):
self.assertEqual(self.module.internal_ip(pl=pl), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto'), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo'), '127.0.0.1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo'), None)
self.assertEqual(self.module.internal_ip(pl=pl, ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto', ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo', ipv=4), '127.0.0.1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo', ipv=4), None)
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo', ipv=6), '::1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo', ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), '::1')
interfaces[:] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), None)
gateways = {
'default': {
netifaces.AF_INET: ('192.168.100.1', 'enp2s0'),
netifaces.AF_INET6: ('feff::5446:5eff:fe5a:0001', 'enp2s0')
}
}
with replace_module_module(
self.module, 'netifaces',
interfaces=(lambda: interfaces),
ifaddresses=(lambda interface: addr[interface]),
gateways=(lambda: gateways),
AF_INET=netifaces.AF_INET,
AF_INET6=netifaces.AF_INET6,
):
# default gateway has specified address family
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
# default gateway doesn't have specified address family
gateways['default'] = {}
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=4), None)
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=6), None)
def test_network_load(self):
def gb(interface):
return None
f = [gb]
def _get_bytes(interface):
return f[0](interface)
pl = Pl()
with replace_attr(self.module, '_get_bytes', _get_bytes):
self.module.network_load.startup(pl=pl)
try:
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
sleep(self.module.network_load.interval)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
while 'prev' not in self.module.network_load.interfaces.get('eth0', {}):
sleep(0.1)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
l = [0, 0]
def gb2(interface):
l[0] += 1200
l[1] += 2400
return tuple(l)
f[0] = gb2
while not self.module.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]:
sleep(0.1)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'DL 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 'UL 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 Kibps', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 Kibps', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 kB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 kB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
class ApproxEqual(object):
def __eq__(self, i):
return abs(i - 50.0) < 1
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()},
])
finally:
self.module.network_load.shutdown()
class TestEnv(TestCommon):
module_name = 'env'
def test_user(self):
new_os = new_module('os', getpid=lambda: 1)
class Process(object):
def __init__(self, pid):
pass
def username(self):
return 'def@DOMAIN.COM'
if hasattr(self.module, 'psutil') and not callable(self.module.psutil.Process.username):
username = property(username)
struct_passwd = namedtuple('struct_passwd', ('pw_name',))
new_psutil = new_module('psutil', Process=Process)
new_pwd = new_module('pwd', getpwuid=lambda uid: struct_passwd(pw_name='def@DOMAIN.COM'))
new_getpass = new_module('getpass', getuser=lambda: 'def@DOMAIN.COM')
pl = Pl()
with replace_attr(self.module, 'pwd', new_pwd):
with replace_attr(self.module, 'getpass', new_getpass):
with replace_attr(self.module, 'os', new_os):
with replace_attr(self.module, 'psutil', new_psutil):
with replace_attr(self.module, '_geteuid', lambda: 5):
self.assertEqual(self.module.user(pl=pl), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_user='abc'), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_domain=False), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_user='def@DOMAIN.COM'), None)
self.assertEqual(self.module.user(pl=pl, hide_domain=True), [
{'contents': 'def', 'highlight_groups': ['user']}
])
with replace_attr(self.module, '_geteuid', lambda: 0):
self.assertEqual(self.module.user(pl=pl), [
{'contents': 'def', 'highlight_groups': ['superuser', 'user']}
])
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(self.module, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_groups': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, self.module.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, self.module.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
def test_virtualenv(self):
pl = Pl()
with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('VIRTUAL_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
with replace_env('CONDA_DEFAULT_ENV', 'foo') as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('CONDA_DEFAULT_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
with replace_env('CONDA_DEFAULT_ENV', 'foo', environ={'VIRTUAL_ENV': '/sbc/def/ghi'}) as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('CONDA_DEFAULT_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
def test_environment(self):
pl = Pl()
variable = 'FOO'
value = 'bar'
with replace_env(variable, value) as segment_info:
self.assertEqual(self.module.environment(pl=pl, segment_info=segment_info, variable=variable), value)
segment_info['environ'].pop(variable)
self.assertEqual(self.module.environment(pl=pl, segment_info=segment_info, variable=variable), None)
class TestVcs(TestCommon):
module_name = 'vcs'
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
segment_info = {'getcwd': os.getcwd}
branch = partial(self.module.branch, pl=pl, create_watcher=create_watcher)
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: None, directory='/tmp/tests')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'contents': 'tests',
'highlight_groups': ['branch_clean', 'branch'],
'divider_highlight_group': None
}])
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: 'D ', directory='/tmp/tests')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: 'D '):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'contents': 'tests',
'highlight_groups': ['branch_dirty', 'branch'],
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
with replace_attr(self.module, 'guess', lambda path, create_watcher: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), None)
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: 'U')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: 'U'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False, ignore_statuses=['U']), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['DU']), [{
'highlight_groups': ['branch_dirty', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'highlight_groups': ['branch_dirty', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['U']), [{
'highlight_groups': ['branch_clean', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
def test_stash(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
stash = partial(self.module.stash, pl=pl, create_watcher=create_watcher, segment_info={'getcwd': os.getcwd})
def forge_stash(n):
return replace_attr(self.module, 'guess', get_dummy_guess(stash=lambda: n, directory='/tmp/tests'))
with forge_stash(0):
self.assertEqual(stash(), None)
with forge_stash(1):
self.assertEqual(stash(), [{
'highlight_groups': ['stash'],
'contents': '1',
'divider_highlight_group': None
}])
with forge_stash(2):
self.assertEqual(stash(), [{
'highlight_groups': ['stash'],
'contents': '2',
'divider_highlight_group': None
}])
class TestTime(TestCommon):
module_name = 'time'
def test_date(self):
pl = Pl()
with replace_attr(self.module, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))):
self.assertEqual(self.module.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_groups': ['date'], 'divider_highlight_group': None}])
self.assertEqual(self.module.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_groups': ['time', 'date'], 'divider_highlight_group': 'time:divider'}])
unicode_date = self.module.date(pl=pl, format='\u231a', istime=True)
expected_unicode_date = [{'contents': '\u231a', 'highlight_groups': ['time', 'date'], 'divider_highlight_group': 'time:divider'}]
if python_implementation() == 'PyPy' and sys.version_info >= (3,):
if unicode_date != expected_unicode_date:
raise SkipTest('Dates do not match, see https://bitbucket.org/pypy/pypy/issues/2161/pypy3-strftime-does-not-accept-unicode')
self.assertEqual(unicode_date, expected_unicode_date)
def test_fuzzy_time(self):
time = Args(hour=0, minute=45)
pl = Pl()
with replace_attr(self.module, 'datetime', Args(now=lambda: time)):
self.assertEqual(self.module.fuzzy_time(pl=pl), 'quarter to one')
time.hour = 23
time.minute = 59
self.assertEqual(self.module.fuzzy_time(pl=pl), 'round about midnight')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl), 'twelve o\'clock')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=False), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=False), 'twelve o\'clock')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=True), 'twenty‐five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=True), 'twelve o’clock')
class TestSys(TestCommon):
module_name = 'sys'
def test_uptime(self):
pl = Pl()
with replace_attr(self.module, '_get_uptime', lambda: 259200):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '3d', 'divider_highlight_group': 'background:divider'}])
with replace_attr(self.module, '_get_uptime', lambda: 93784):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '1d 2h 3m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=4), [{'contents': '1d 2h 3m 4s', 'divider_highlight_group': 'background:divider'}])
with replace_attr(self.module, '_get_uptime', lambda: 65536):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '18h 12m 16s', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=2), [{'contents': '18h 12m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=1), [{'contents': '18h', 'divider_highlight_group': 'background:divider'}])
def _get_uptime():
raise NotImplementedError
with replace_attr(self.module, '_get_uptime', _get_uptime):
self.assertEqual(self.module.uptime(pl=pl), None)
def test_system_load(self):
pl = Pl()
with replace_module_module(self.module, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)):
with replace_attr(self.module, '_cpu_count', lambda: 2):
self.assertEqual(self.module.system_load(pl=pl), [
{'contents': '7.5 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '3.5 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0},
{'contents': '1.5', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0}
])
self.assertEqual(self.module.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1), [
{'contents': '8 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '4 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '2', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}
])
self.assertEqual(self.module.system_load(pl=pl, short=True), [
{'contents': '7.5', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
])
self.assertEqual(self.module.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1, short=True), [
{'contents': '8', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
])
def test_cpu_load_percent(self):
try:
__import__('psutil')
except ImportError as e:
raise SkipTest('Failed to import psutil: {0}'.format(e))
pl = Pl()
with replace_module_module(self.module, 'psutil', cpu_percent=lambda **kwargs: 52.3):
self.assertEqual(self.module.cpu_load_percent(pl=pl), [{
'contents': '52%',
'gradient_level': 52.3,
'highlight_groups': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
self.assertEqual(self.module.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{
'contents': '52.3%',
'gradient_level': 52.3,
'highlight_groups': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
class TestWthr(TestCommon):
module_name = 'wthr'
def test_weather(self):
pl = Pl()
with replace_attr(self.module, 'urllib_read', urllib_read):
self.assertEqual(self.module.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, temp_coldest=0, temp_hottest=100), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 14.0}
])
self.assertEqual(self.module.weather(pl=pl, temp_coldest=-100, temp_hottest=-50), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 100}
])
self.assertEqual(self.module.weather(pl=pl, icons={'blustery': 'o'}), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'o '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, icons={'windy': 'x'}), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'x '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, unit='F'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '57°F', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, unit='K'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '287K', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, temp_format='{temp:.1e}C'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '1.4e+01C', 'gradient_level': 62.857142857142854}
])
with replace_attr(self.module, 'urllib_read', urllib_read):
self.module.weather.startup(pl=pl, location_query='Meppen,06,DE')
self.assertEqual(self.module.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, location_query='Moscow,RU'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_fair_night', 'weather_condition_night', 'weather_conditions', 'weather'], 'contents': 'NIGHT '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '9°C', 'gradient_level': 55.714285714285715}
])
self.module.weather.shutdown()
class TestI3WM(TestCase):
@staticmethod
def get_workspaces():
return iter([
{'name': '1: w1', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': False},
{'name': '2: w2', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': True},
{'name': '3: w3', 'output': 'HDMI1', 'focused': False, 'urgent': True, 'visible': True},
{'name': '4: w4', 'output': 'DVI01', 'focused': True, 'urgent': True, 'visible': True},
])
def test_workspaces(self):
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Args(get_workspaces=self.get_workspaces)):
segment_info = {}
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=None), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['focused', 'urgent']), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible']), [
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': 'w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': 'w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['focused', 'urgent'], output='DVI01'), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], output='HDMI1'), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3, output='LVDS1'), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
])
segment_info['output'] = 'LVDS1'
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], output='HDMI1'), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
])
def test_workspace(self):
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Args(get_workspaces=self.get_workspaces)):
segment_info = {}
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='1: w1'), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='3: w3', strip=True), [
{'contents': 'w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='9: w9'), None)
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
segment_info['workspace'] = next(self.get_workspaces())
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='4: w4'), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, strip=True), [
{'contents': 'w1', 'highlight_groups': ['workspace']},
])
def test_mode(self):
pl = Pl()
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'default'}), None)
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'test'}), 'test')
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'default'}, names={'default': 'test'}), 'test')
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'test'}, names={'default': 'test', 'test': 't'}), 't')
def test_scratchpad(self):
class Conn(object):
def get_tree(self):
return self
def descendents(self):
nodes_unfocused = [Args(focused = False)]
nodes_focused = [Args(focused = True)]
workspace_scratch = lambda: Args(name='__i3_scratch')
workspace_noscratch = lambda: Args(name='2: www')
return [
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_unfocused),
Args(scratchpad_state='changed', urgent=True, workspace=workspace_noscratch, nodes=nodes_focused),
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_unfocused),
Args(scratchpad_state=None, urgent=False, workspace=workspace_noscratch, nodes=nodes_unfocused),
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_focused),
Args(scratchpad_state=None, urgent=True, workspace=workspace_noscratch, nodes=nodes_unfocused),
]
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Conn()):
self.assertEqual(i3wm.scratchpad(pl=pl), [
{'contents': 'O', 'highlight_groups': ['scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:focused', 'scratchpad:visible', 'scratchpad']},
{'contents': 'O', 'highlight_groups': ['scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:visible', 'scratchpad']},
{'contents': 'O', 'highlight_groups': ['scratchpad:focused', 'scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:visible', 'scratchpad']},
])
self.assertEqual(i3wm.scratchpad(pl=pl, icons={'changed': '-', 'fresh': 'o'}), [
{'contents': 'o', 'highlight_groups': ['scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:focused', 'scratchpad:visible', 'scratchpad']},
{'contents': 'o', 'highlight_groups': ['scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:visible', 'scratchpad']},
{'contents': 'o', 'highlight_groups': ['scratchpad:focused', 'scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:visible', 'scratchpad']},
])
class TestMail(TestCommon):
module_name = 'mail'
def test_email_imap_alert(self):
# TODO
pass
class TestPlayers(TestCommon):
module_name = 'players'
def test_now_playing(self):
# TODO
pass
class TestBat(TestCommon):
module_name = 'bat'
def test_battery(self):
pl = Pl()
def _get_battery_status(pl):
return 86, False
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl), [{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, format='{capacity:.2f}'), [{
'contents': '0.86',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, steps=7), [{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, gamify=True), [
{
'contents': ' ',
'draw_inner_divider': False,
'highlight_groups': ['battery_offline', 'battery_ac_state', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': 'OOOO',
'draw_inner_divider': False,
'highlight_groups': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': 'O',
'draw_inner_divider': False,
'highlight_groups': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
self.assertEqual(self.module.battery(pl=pl, gamify=True, full_heart='+', empty_heart='-', steps='10'), [
{
'contents': ' ',
'draw_inner_divider': False,
'highlight_groups': ['battery_offline', 'battery_ac_state', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': '++++++++',
'draw_inner_divider': False,
'highlight_groups': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': '--',
'draw_inner_divider': False,
'highlight_groups': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
def test_battery_with_ac_online(self):
pl = Pl()
def _get_battery_status(pl):
return 86, True
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl, online='C', offline=' '), [
{
'contents': 'C 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
def test_battery_with_ac_offline(self):
pl = Pl()
def _get_battery_status(pl):
return 86, False
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl, online='C', offline=' '), [
{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
class TestVim(TestCase):
def test_mode(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'NORMAL')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'i': 'INS'}), 'NORMAL')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'n': 'NORM'}), 'NORM')
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', 'i\0') as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', chr(ord('V') - 0x40)) as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'V-BLCK')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'^V': 'VBLK'}), 'VBLK')
def test_visual_range(self):
pl = Pl()
vr = partial(self.vim.visual_range, pl=pl)
vim_module.current.window.cursor = [0, 0]
try:
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '')
with vim_module._with('mode', '^V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', '^S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', 'V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'v') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 's') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
finally:
vim_module._close(1)
def test_modified_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), None)
segment_info['buffer'][0] = 'abc'
try:
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), '+')
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info, text='-'), '-')
finally:
vim_module._bw(segment_info['bufnr'])
def test_paste_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('options', paste=1):
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info), 'PASTE')
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info, text='P'), 'P')
def test_readonly_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', readonly=1):
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info), 'RO')
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info, text='L'), 'L')
def test_file_scheme(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), 'zipfile')
def test_file_directory(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), None)
with replace_env('HOME', '/home/foo', os.environ):
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/’’/')
with vim_module._with('buffer', b'/tmp/\xFF\xFF/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/<ff><ff>/')
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/')
os.environ['HOME'] = '/tmp'
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '~/')
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
os.environ['HOME'] = '/tmp'
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
def test_file_name(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True), [
{'contents': '[No file]', 'highlight_groups': ['file_name_no_file', 'file_name']}
])
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True, no_file_text='X'), [
{'contents': 'X', 'highlight_groups': ['file_name_no_file', 'file_name']}
])
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), 'abc')
with vim_module._with('buffer', '/tmp/’’') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), '’’')
with vim_module._with('buffer', b'/tmp/\xFF\xFF') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), '<ff><ff>')
def test_file_size(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_size(pl=pl, segment_info=segment_info), '0 B')
with vim_module._with('buffer', os.path.join(os.path.dirname(__file__), 'empty')) as segment_info:
self.assertEqual(self.vim.file_size(pl=pl, segment_info=segment_info), '0 B')
def test_file_opts(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_format(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'unix'}
])
self.assertEqual(self.vim.file_encoding(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'utf-8'}
])
self.assertEqual(self.vim.file_type(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', filetype='python'):
self.assertEqual(self.vim.file_type(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'python'}
])
def test_window_title(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.window_title(pl=pl, segment_info=segment_info), None)
with vim_module._with('wvars', quickfix_title='Abc'):
self.assertEqual(self.vim.window_title(pl=pl, segment_info=segment_info), 'Abc')
def test_line_percent(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(100)]
try:
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info), '1')
vim_module._set_cursor(50, 0)
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info), '50')
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50', 'highlight_groups': ['line_percent_gradient', 'line_percent'], 'gradient_level': 50 * 100.0 / 101}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_line_count(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
try:
self.assertEqual(self.vim.line_count(pl=pl, segment_info=segment_info), '100')
vim_module._set_cursor(50, 0)
self.assertEqual(self.vim.line_count(pl=pl, segment_info=segment_info), '100')
finally:
vim_module._bw(segment_info['bufnr'])
def test_position(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
try:
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
vim_module._set_cursor(49, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info), '50%')
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50%', 'highlight_groups': ['position_gradient', 'position'], 'gradient_level': 50.0}
])
vim_module._set_cursor(0, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info), 'Top')
vim_module._set_cursor(97, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Final')
segment_info['buffer'][0:-1] = [str(i) for i in range(2)]
vim_module._set_cursor(0, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Todo')
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': 'All', 'highlight_groups': ['position_gradient', 'position'], 'gradient_level': 0.0}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_cursor_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.line_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(self.vim.col_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(self.vim.virtcol_current(pl=pl, segment_info=segment_info), [{
'highlight_groups': ['virtcol_current_gradient', 'virtcol_current', 'col_current'], 'contents': '1', 'gradient_level': 100.0 / 80,
}])
self.assertEqual(self.vim.virtcol_current(pl=pl, segment_info=segment_info, gradient=False), [{
'highlight_groups': ['virtcol_current', 'col_current'], 'contents': '1',
}])
def test_modified_buffers(self):
pl = Pl()
self.assertEqual(self.vim.modified_buffers(pl=pl), None)
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
branch = partial(self.vim.branch, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: None)):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_clean', 'branch'], 'contents': 'foo'}
])
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: 'DU')):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: 'DU'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: 'U')):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: 'U'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False, ignore_statuses=['U']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['DU']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['U']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_clean', 'branch'], 'contents': 'foo'}
])
def test_stash(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
with vim_module._with('buffer', '/foo') as segment_info:
stash = partial(self.vim.stash, pl=pl, create_watcher=create_watcher, segment_info=segment_info)
def forge_stash(n):
return replace_attr(self.vcs, 'guess', get_dummy_guess(stash=lambda: n))
with forge_stash(0):
self.assertEqual(stash(), None)
with forge_stash(1):
self.assertEqual(stash(), [{
'divider_highlight_group': 'stash:divider',
'highlight_groups': ['stash'],
'contents': '1'
}])
with forge_stash(2):
self.assertEqual(stash(), [{
'divider_highlight_group': 'stash:divider',
'highlight_groups': ['stash'],
'contents': '2'
}])
def test_file_vcs_status(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
file_vcs_status = partial(self.vim.file_vcs_status, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), [
{'highlight_groups': ['file_vcs_status_M', 'file_vcs_status'], 'contents': 'M'}
])
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: None)):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
with vim_module._with('buffer', '/bar') as segment_info:
with vim_module._with('bufoptions', buftype='nofile'):
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
def test_trailing_whitespace(self):
pl = Pl()
with vim_module._with('buffer', 'tws') as segment_info:
trailing_whitespace = partial(self.vim.trailing_whitespace, pl=pl, segment_info=segment_info)
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(trailing_whitespace(), [{
'highlight_groups': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
self.assertEqual(trailing_whitespace(), [{
'highlight_groups': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
vim_module.current.buffer[0] = ''
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
def test_tabnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tabnr(pl=pl, segment_info=segment_info, show_current=True), '1')
self.assertEqual(self.vim.tabnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_tab(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tab(pl=pl, segment_info=segment_info), [{
'contents': None,
'literal_contents': (0, '%1T'),
}])
self.assertEqual(self.vim.tab(pl=pl, segment_info=segment_info, end=True), [{
'contents': None,
'literal_contents': (0, '%T'),
}])
def test_bufnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.bufnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['bufnr']))
self.assertEqual(self.vim.bufnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_winnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.winnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['winnr']))
self.assertEqual(self.vim.winnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_segment_info(self):
pl = Pl()
with vim_module._with('tabpage'):
with vim_module._with('buffer', '1') as segment_info:
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_groups': ['tab_modified_indicator', 'modified_indicator'],
}])
vim_module._undo()
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer = vim_module.current.buffer
vim_module._new('2')
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer[0] = ' '
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_groups': ['tab_modified_indicator', 'modified_indicator'],
}])
def test_csv_col_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
def csv_col_current(**kwargs):
self.vim.csv_cache and self.vim.csv_cache.clear()
return self.vim.csv_col_current(pl=pl, segment_info=segment_info, **kwargs)
buffer = segment_info['buffer']
try:
self.assertEqual(csv_col_current(), None)
buffer.options['filetype'] = 'csv'
self.assertEqual(csv_col_current(), None)
buffer[:] = ['1;2;3', '4;5;6']
vim_module._set_cursor(1, 1)
self.assertEqual(csv_col_current(), [{
'contents': '1', 'highlight_groups': ['csv:column_number', 'csv']
}])
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}])
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(display_name=True), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (2)', 'highlight_groups': ['csv:column_name', 'csv']
}])
buffer[:0] = ['Foo;Bar;Baz']
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Bar)', 'highlight_groups': ['csv:column_name', 'csv']
}])
if sys.version_info < (2, 7):
raise SkipTest('csv module in Python-2.6 does not handle multiline csv files well')
buffer[len(buffer):] = ['1;"bc', 'def', 'ghi', 'jkl";3']
vim_module._set_cursor(5, 1)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Bar)', 'highlight_groups': ['csv:column_name', 'csv']
}])
vim_module._set_cursor(7, 6)
self.assertEqual(csv_col_current(), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Baz)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (B)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(display_name=True, name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (B)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(display_name=False, name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}])
self.assertEqual(csv_col_current(display_name=False), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}])
finally:
vim_module._bw(segment_info['bufnr'])
@classmethod
def setUpClass(cls):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
from powerline.segments import vim
cls.vim = vim
from powerline.segments.common import vcs
cls.vcs = vcs
@classmethod
def tearDownClass(cls):
sys.path.pop(0)
class TestPDB(TestCase):
def test_current_line(self):
pl = Pl()
self.assertEqual(pdb.current_line(pl=pl, segment_info={'curframe': Args(f_lineno=10)}), '10')
def test_current_file(self):
pl = Pl()
cf = lambda **kwargs: pdb.current_file(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_filename='/tmp/abc.py'))},
**kwargs
)
self.assertEqual(cf(), 'abc.py')
self.assertEqual(cf(basename=True), 'abc.py')
self.assertEqual(cf(basename=False), '/tmp/abc.py')
def test_current_code_name(self):
pl = Pl()
ccn = lambda **kwargs: pdb.current_code_name(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_name='<module>'))},
**kwargs
)
self.assertEqual(ccn(), '<module>')
def test_current_context(self):
pl = Pl()
cc = lambda **kwargs: pdb.current_context(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_name='<module>', co_filename='/tmp/abc.py'))},
**kwargs
)
self.assertEqual(cc(), 'abc.py')
def test_stack_depth(self):
pl = Pl()
sd = lambda **kwargs: pdb.stack_depth(
pl=pl,
segment_info={'pdb': Args(stack=[1, 2, 3]), 'initial_stack_length': 1},
**kwargs
)
self.assertEqual(sd(), '2')
self.assertEqual(sd(full_stack=False), '2')
self.assertEqual(sd(full_stack=True), '3')
old_cwd = None
def setUpModule():
global old_cwd
global __file__
old_cwd = os.getcwd()
__file__ = os.path.abspath(__file__)
os.chdir(os.path.dirname(__file__))
def tearDownModule():
global old_cwd
os.chdir(old_cwd)
if __name__ == '__main__':
from tests import main
main()
|
S0lll0s/powerline
|
tests/test_segments.py
|
Python
|
mit
| 84,994
|
[
"FEFF"
] |
30a01426d6334af081df527cde440036a950bcf4e3bb9c7faa2d94f6c0faac2a
|
#!/usr/bin/python
"""
Copyright 2010 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import cgi
import Cookie
import MySQLdb
import dbSession
import dbShared
import re
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
uiTheme = form.getfirst("uiTheme", '')
inGameInfo = form.getfirst("inGameInfo", '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
uiTheme = dbShared.dbInsertSafe(uiTheme)
inGameInfo = dbShared.dbInsertSafe(inGameInfo)
# Get a session
logged_state = 0
linkappend = ''
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
# Check for errors
errstr=''
if (len(uiTheme) < 1):
errstr = errstr + "That is not a valid theme. \r\n"
if (logged_state == 0):
errstr = errstr + "You must be logged in to update your theme. \r\n"
if len(inGameInfo) > 255:
errstr = errstr + "Error: game info is too large (255 characters or less allowed)."
if re.search('[><&]', inGameInfo):
errstr = errstr + "Error: game info contains illegal characters (no HTML allowed)."
if (errstr != ''):
result = "Your other info could not be updated because of the following errors:\r\n" + errstr
else:
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("UPDATE tUsers SET themeName='" + uiTheme + "', inGameInfo='" + inGameInfo + "' WHERE userID='" + currentUser + "';")
cursor.close()
conn.close()
result = "Other Info Updated"
if useCookies:
cookies['uiTheme'] = uiTheme
cookies['uiTheme']['max-age'] = (86400 * 7)
print cookies
print "Content-Type: text/html\n"
print result
|
clreinki/GalaxyHarvester
|
udUserOther.py
|
Python
|
agpl-3.0
| 2,904
|
[
"Galaxy"
] |
61e987eb7bc59fb6fcbde0a1a37851ade04c449b4d5e8653966692a68851e242
|
import logging
import os
import pandas
import pysam
import cStringIO as StringIO
import sys
from grocsvs import utilities
from grocsvs.utilities import get_key
class Dataset(object):
def serialize(self):
d = self.__dict__.copy()
d["type"] = self.__class__.__name__
del d["sample"]
return d
@staticmethod
def deserialize(sample, dict_):
if not isinstance(dict_, dict):
print "samples must be of type 'dict', not '{}': '{}'".format(type(dict_).__name__, dict_)
sys.exit(1)
dict_ = dict_.copy()
dataset_types = [TenXDataset, ShortFragDataset, MatePairDataset]
dataset_types = dict((x.__name__, x) for x in dataset_types)
type_ = get_key(dict_, "type", error_msg="sample") # just for type-checking
if not type_ in dataset_types:
print "ERROR: Sample type must be one of '{}'; got '{}' instead".format(dataset_types.keys(), type_)
sys.exit(1)
dataset_class = dataset_types[dict_.pop("type")]
dict_["sample"] = sample
#try:
return dataset_class(**dict_)
#except TypeError:
# print "MISSING FIELD FOR SAMPLE:", sample.name, dataset_class
# print " Fields provided:", dataset_class.__class__.__name__, dict_
# sys.exit(1)
class TenXDataset(Dataset):
def __init__(self, **kwdargs):#sample, bam, fragments, phased_fragments, id, sorted_fastqs=None):
self.sample = get_key(kwdargs, "sample", None, error_msg="TenXDataset")
self.bam = os.path.realpath(get_key(kwdargs, "bam", error_msg="TenXDataset"))
#self.fragments = get_key(kwdargs, "fragments", error_msg="TenXDataset")
#self.phased_fragments = get_key(kwdargs, "phased_fragments", error_msg="TenXDataset")
#self.sorted_fastqs = get_key(kwdargs, "sorted_fastqs", default=None, error_msg="TenXDataset")
self.id = get_key(kwdargs, "id", error_msg="TenXDataset")
self.validate()
def validate(self):
assert os.path.exists(self.bam), "missing bam file '{}' for sample '{}' and dataset '{}'".format(
self.bam, self.sample.name, self.id)
# @staticmethod
# def from_longranger_dir(self, longranger_dir):
# fragments = os.path.join(longranger_dir,
# "PHASER_SVCALLER_CS/PHASER_SVCALLER/_REPORTER/"
# "REPORT_SINGLE_PARTITION/fork0/files/fragments.h5")
# bam = os.path.join(longranger_dir,
# "PHASER_SVCALLER_CS/PHASER_SVCALLER/ATTACH_PHASING/"
# "fork0/files/phased_possorted_bam.bam")
# phased_fragments = os.path.join(longranger_dir,
# "10XSARCOMAC1/PHASER_SVCALLER_CS/PHASER_SVCALLER/"
# "_SNPINDEL_PHASER/PHASE_SNPINDELS/fork0/files/"
# "fragment_phasing.tsv.gz")
# self.validate()
# return TenXDataset(bam, fragments, phased_fragments)
# def load_phased_fragments(self, chrom=None, start=None, end=None):
# columns = ["chrom", "start_pos", "end_pos", "phase_set", "ps_start",
# "ps_end", "bc", "h0", "h1", "hmix", "unkn"]
# try:
# tabix = pysam.TabixFile(self.phased_fragments)
# s = StringIO.StringIO("\n".join(tabix.fetch(chrom, start, end)))
# frags = pandas.read_table(s)
# frags.columns = columns
# except (IOError, ValueError):
# frags = pandas.DataFrame(columns=columns)
# return frags
# def load_fragments(self, chrom=None, start=None, end=None):
# tabix = pysam.TabixFile()
# try:
# fragments = utilities.read_data_frame(self.fragments)
# goodbcs = utilities.get_good_barcodes(fragments)
# fragments = fragments.loc[fragments["bc"].isin(goodbcs)]
# # fragments = fragments.loc[fragments["num_reads"]>5]
# if chrom is not None:
# fragments = fragments.loc[fragments["chrom"]==chrom]
# return fragments
# except:
# logging.exception("Unable to load fragments from fragments file "
# "'{}'".format(self.fragments))
# raise
class ShortFragDataset(Dataset):
def __init__(self, sample, bam, id):
self.sample = sample
self.bam = os.path.realpath(bam)
self.id = id
class MatePairDataset(Dataset):
def __init__(self, sample, bam, id):
self.sample = sample
self.bam = os.path.realpath(bam)
self.id = id
|
grocsvs/grocsvs
|
src/grocsvs/datasets.py
|
Python
|
mit
| 4,607
|
[
"pysam"
] |
c2ec6fa13ab400c6e4f0872be78f1a08fe9fcc97630019096f917ca11870021b
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyUnicycler(PythonPackage):
"""Unicycler is an assembly pipeline for bacterial genomes. It can
assemble Illumina-only read sets where it functions as a SPAdes-optimiser.
It can also assembly long-read-only sets (PacBio or Nanopore) where it
runs a miniasm+Racon pipeline. For the best possible assemblies, give it
both Illumina reads and long reads, and it will conduct a hybrid assembly.
"""
homepage = "https://github.com/rrwick/Unicycler"
url = "https://github.com/rrwick/Unicycler/archive/v0.4.5.tar.gz"
version('0.4.7', sha256='a8cf65e46dc2694b0fbd4e9190c73a1f300921457aadfab27a1792b785620d63')
version('0.4.6', sha256='56f6f358a5d1f8dd0fcd1df04504079fc42cec8453a36ee59ff89295535d03f5')
version('0.4.5', sha256='67043656b31a4809f8fa8f73368580ba7658c8440b9f6d042c7f70b5eb6b19ae')
depends_on('python@3.4:', type=('build', 'link', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('spades', type='run')
depends_on('pilon', type='run')
depends_on('jdk', type=('build', 'run'))
depends_on('bowtie2', type='run')
depends_on('samtools@1.0:', type=('build', 'link', 'run'))
depends_on('racon', type=('build', 'link', 'run'))
depends_on('blast-plus', type='run')
conflicts('%gcc@:4.9.0')
conflicts('%clang@:3.4.2')
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-unicycler/package.py
|
Python
|
lgpl-2.1
| 1,554
|
[
"BLAST"
] |
8ffc41c1195074f8c0a56d8d58a0e61a865db2a361e13009311946b6d7d24d20
|
import logging
import numpy as np
import networkx as nx
from pgmpy.models import BayesianNetwork
from pgmpy.factors.continuous import LinearGaussianCPD
from pgmpy.factors.distributions import GaussianDistribution
class LinearGaussianBayesianNetwork(BayesianNetwork):
"""
A Linear Gaussian Bayesian Network is a Bayesian Network, all
of whose variables are continuous, and where all of the CPDs
are linear Gaussians.
An important result is that the linear Gaussian Bayesian Networks
are an alternative representation for the class of multivariate
Gaussian distributions.
"""
def add_cpds(self, *cpds):
"""
Add linear Gaussian CPD (Conditional Probability Distribution)
to the Bayesian Network.
Parameters
----------
cpds : instances of LinearGaussianCPD
List of LinearGaussianCPDs which will be associated with the model
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.cpds:
... print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
"""
for cpd in cpds:
if not isinstance(cpd, LinearGaussianCPD):
raise ValueError("Only LinearGaussianCPD can be added.")
if set(cpd.variables) - set(cpd.variables).intersection(set(self.nodes())):
raise ValueError("CPD defined on variable not in the model", cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning(f"Replacing existing CPD for {cpd.variable}")
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpd of the node. If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Returns
-------
A list of linear Gaussian CPDs.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> model.get_cpds()
"""
return super(LinearGaussianBayesianNetwork, self).get_cpds(node)
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: LinearGaussianCPD object
A LinearGaussianCPD object on any subset of the variables
of the model which is to be associated with the model.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.get_cpds():
... print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
>>> model.remove_cpds(cpd2, cpd3)
>>> for cpd in model.get_cpds():
... print(cpd)
P(x1) = N(1; 4)
"""
return super(LinearGaussianBayesianNetwork, self).remove_cpds(*cpds)
def to_joint_gaussian(self):
"""
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
"""
variables = list(nx.topological_sort(self))
mean = np.zeros(len(variables))
covariance = np.zeros((len(variables), len(variables)))
for node_idx in range(len(variables)):
cpd = self.get_cpds(variables[node_idx])
mean[node_idx] = (
sum(
[
coeff * mean[variables.index(parent)]
for coeff, parent in zip(cpd.mean, cpd.evidence)
]
)
+ cpd.mean[0]
)
covariance[node_idx, node_idx] = (
sum(
[
coeff
* coeff
* covariance[variables.index(parent), variables.index(parent)]
for coeff, parent in zip(cpd.mean, cpd.evidence)
]
)
+ cpd.variance
)
for node_i_idx in range(len(variables)):
for node_j_idx in range(len(variables)):
if covariance[node_j_idx, node_i_idx] != 0:
covariance[node_i_idx, node_j_idx] = covariance[
node_j_idx, node_i_idx
]
else:
cpd_j = self.get_cpds(variables[node_j_idx])
covariance[node_i_idx, node_j_idx] = sum(
[
coeff * covariance[node_i_idx, variables.index(parent)]
for coeff, parent in zip(cpd_j.mean, cpd_j.evidence)
]
)
return GaussianDistribution(variables, mean, covariance)
def check_model(self):
"""
Checks the model for various errors. This method checks for the following
error -
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks pass.
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, LinearGaussianCPD):
if set(cpd.evidence) != set(self.get_parents(node)):
raise ValueError(
"CPD associated with %s doesn't have "
"proper parents associated with it." % node
)
return True
def get_cardinality(self, node):
"""
Cardinality is not defined for continuous variables.
"""
raise ValueError("Cardinality is not defined for continuous variables.")
def fit(
self, data, estimator=None, state_names=[], complete_samples_only=True, **kwargs
):
"""
For now, fit method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"fit method has not been implemented for LinearGaussianBayesianNetwork."
)
def predict(self, data):
"""
For now, predict method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"predict method has not been implemented for LinearGaussianBayesianNetwork."
)
def to_markov_model(self):
"""
For now, to_markov_model method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"to_markov_model method has not been implemented for LinearGaussianBayesianNetwork."
)
def is_imap(self, JPD):
"""
For now, is_imap method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"is_imap method has not been implemented for LinearGaussianBayesianNetwork."
)
|
pgmpy/pgmpy
|
pgmpy/models/LinearGaussianBayesianNetwork.py
|
Python
|
mit
| 9,688
|
[
"Gaussian"
] |
adfc61e4f97d4f33ea5fc46a29efce08ad152e8a23e19aa399b3c26bee1f527f
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-production-job-get-input
# Author : Stuart Paterson
########################################################################
"""
Retrieve input sandbox for DIRAC Job
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
import os
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.registerSwitch( "D:", "Dir=", "Store the output in this directory" )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
outputDir = None
for sw, v in Script.getUnprocessedSwitches():
if sw in ( 'D', 'Dir' ):
outputDir = v
for job in parseArguments( args ):
result = dirac.getInputSandbox( job, outputDir = outputDir )
if result['OK']:
if os.path.exists( 'InputSandbox%s' % job ):
print('Job input sandbox retrieved in InputSandbox%s/' % (job))
else:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit( exitCode )
|
petricm/DIRAC
|
Interfaces/scripts/dirac-wms-job-get-input.py
|
Python
|
gpl-3.0
| 1,578
|
[
"DIRAC"
] |
21b641c2d164a683f6973e984d42e6c5d3a959ccbb06ac4f4444a12329f0ffc7
|
import copy
import random
import numpy as np
from Hypothesis import Hypothesis
from LOTlib.Miscellaneous import self_update
class VectorHypothesis(Hypothesis):
"""Store n-dimensional vectors (defaultly with Gaussian proposals)."""
def __init__(self, value=None, n=1, proposal=None, propose_scale=1.0, propose_n=1):
self.n = n
self.propose_n = propose_n
if value is None:
value = np.random.multivariate_normal(np.array([0.0] * n), proposal)
if proposal is None:
proposal = np.eye(n) * propose_scale
propose_mask = self.get_propose_mask()
proposal = proposal * propose_mask
self.proposal = proposal
Hypothesis.__init__(self, value=value)
self_update(self, locals())
def propose(self):
"""New value is sampled from a normal centered @ old values, w/ proposal as covariance."""
step = np.random.multivariate_normal(self.value, self.proposal)
new_value = copy.copy(self.value)
for i in random.sample(self.proposal.nonzero()[0], self.propose_n):
new_value[i] = step[i]
c = self.__copy__(new_value)
return c, 0.0
def get_propose_mask(self):
"""Default propose mask method."""
return [True] * self.n
def get_propose_idxs(self):
return [i for i, m in enumerate(self.get_propose_mask()) if m]
def compute_gradient(self, data, grad_step=.1):
partials = np.zeros(self.n)
posterior = self.compute_posterior(data)
print '&'*110, ' GRADIENT'
print 'POST: ', posterior
for i in range(self.n):
new_value = copy.copy(self.value)
new_value[i] += grad_step
c = self.__copy__(new_value)
posterior_i = c.compute_posterior(data)
partials[i] = (np.exp(posterior_i) - np.exp(posterior)) / grad_step
print 'POST_I: ', posterior_i
print '&'*110
return partials
def conditional_distribution(self, data, value_index, vals=np.arange(0, 2, .2)):
"""Compute posterior values for this grammar, varying specified value over a specified set.
Args
data(list): List of datums.
rule_name(string): Index of the value we're varying probabilities over.
vals(list): List of float values. E.g. [0,.2,.4, ..., 2.]
Returns:
list: List of [prior, likelihood, posterior], where each item corresponds to an item in the
`vals` argument.
"""
dist = []
old_value = copy.copy(self.value)
for p in vals:
value = copy.copy(self.value)
value[value_index] = p
self.set_value(value)
posterior = self.compute_posterior(data, updateflag=False)
dist.append([self.prior, self.likelihood, posterior])
self.set_value(old_value)
return vals, dist
def __copy__(self, value=None):
"""Copy this GH; shallow copies of value & proposal so we don't have sampling issues."""
if value is None:
value = copy.copy(self.value)
proposal = copy.copy(self.proposal)
c = type(self)()
c.__dict__.update(self.__dict__)
c.proposal = proposal
c.set_value(value)
return c
|
joshrule/LOTlib
|
LOTlib/Hypotheses/VectorHypothesis.py
|
Python
|
gpl-3.0
| 3,324
|
[
"Gaussian"
] |
a5f6c9697603a79e1be17c3df13f5b6981d7134508d4fbd3a13260793d590d68
|
import unittest
import pysal
import numpy as np
from pysal.spreg import probit as PB
from pysal.common import RTOL
class TestBaseProbit(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
self.y = (y>40).astype(float)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("HOVAL"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = PB.BaseProbit(self.y, self.X, w=self.w)
betas = np.array([[ 3.35381078], [-0.1996531 ], [-0.02951371]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
predy = np.array([ 0.00174739])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 3
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 0.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.x[0],x,RTOL)
vm = np.array([[ 8.52813879e-01, -4.36272459e-02, -8.05171472e-03], [ -4.36272459e-02, 4.11381444e-03, -1.92834842e-04], [ -8.05171472e-03, -1.92834842e-04, 3.09660240e-04]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xmean = np.array([[ 1. ], [ 14.37493876], [ 38.43622447 ]])
np.testing.assert_allclose(reg.xmean,xmean,RTOL)
predpc = 85.714285714285708
np.testing.assert_allclose(reg.predpc,predpc,RTOL)
logl = -20.06009093055782
np.testing.assert_allclose(reg.logl,logl,RTOL)
scale = 0.23309310130643665
np.testing.assert_allclose(reg.scale,scale,RTOL)
slopes = np.array([[-0.04653776], [-0.00687944]])
np.testing.assert_allclose(reg.slopes,slopes,RTOL)
slopes_vm = np.array([[ 1.77101993e-04, -1.65021168e-05], [ -1.65021168e-05, 1.60575016e-05]])
np.testing.assert_allclose(reg.slopes_vm,slopes_vm,RTOL)
LR = 25.317683245671716
np.testing.assert_allclose(reg.LR[0],LR,RTOL)
Pinkse_error = 2.9632385352516728
np.testing.assert_allclose(reg.Pinkse_error[0],Pinkse_error,RTOL)
KP_error = 1.6509224700582124
np.testing.assert_allclose(reg.KP_error[0],KP_error,RTOL)
PS_error = 2.3732463777623511
np.testing.assert_allclose(reg.PS_error[0],PS_error,RTOL)
class TestProbit(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
y = np.reshape(y, (49,1))
self.y = (y>40).astype(float)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("HOVAL"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = PB.Probit(self.y, self.X, w=self.w)
betas = np.array([[ 3.35381078], [-0.1996531 ], [-0.02951371]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
predy = np.array([ 0.00174739])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 3
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 0.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.x[0],x,RTOL)
vm = np.array([[ 8.52813879e-01, -4.36272459e-02, -8.05171472e-03], [ -4.36272459e-02, 4.11381444e-03, -1.92834842e-04], [ -8.05171472e-03, -1.92834842e-04, 3.09660240e-04]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xmean = np.array([[ 1. ], [ 14.37493876], [ 38.43622447 ]])
np.testing.assert_allclose(reg.xmean,xmean,RTOL)
predpc = 85.714285714285708
np.testing.assert_allclose(reg.predpc,predpc,RTOL)
logl = -20.06009093055782
np.testing.assert_allclose(reg.logl,logl,RTOL)
scale = 0.23309310130643665
np.testing.assert_allclose(reg.scale,scale,RTOL)
slopes = np.array([[-0.04653776], [-0.00687944]])
np.testing.assert_allclose(reg.slopes,slopes,RTOL)
slopes_vm = np.array([[ 1.77101993e-04, -1.65021168e-05], [ -1.65021168e-05, 1.60575016e-05]])
np.testing.assert_allclose(reg.slopes_vm,slopes_vm,RTOL)
LR = 25.317683245671716
np.testing.assert_allclose(reg.LR[0],LR,RTOL)
Pinkse_error = 2.9632385352516728
np.testing.assert_allclose(reg.Pinkse_error[0],Pinkse_error,RTOL)
KP_error = 1.6509224700582124
np.testing.assert_allclose(reg.KP_error[0],KP_error,RTOL)
PS_error = 2.3732463777623511
np.testing.assert_allclose(reg.PS_error[0],PS_error,RTOL)
if __name__ == '__main__':
unittest.main()
|
lanselin/pysal
|
pysal/spreg/tests/test_probit.py
|
Python
|
bsd-3-clause
| 5,150
|
[
"COLUMBUS"
] |
3fa6f90372e4751f56aad77fda6b0b7ebdc22c18e867b3c80a142c250504df94
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# validstring - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""String validation"""
import os.path
from shared.base import invisible_path
def cert_name_format(input_string):
""" Spaces in certificate names are replaced with underscore internally """
return input_string.title().replace(' ', '_')
def is_valid_email_address(addr, logger):
"""From http://www.secureprogramming.com/?action=view&feature=recipes&recipeid=1"""
logger.info("verifying if '%s' is a valid email address" % addr)
rfc822_specials = '()<>@,;:\\"[]'
# First we validate the name portion (name@domain)
c = 0
while c < len(addr):
if addr[c] == '"' and (not c or addr[c - 1] == '.' or addr[c
- 1] == '"'):
c += 1
while c < len(addr):
if addr[c] == '"':
break
if addr[c] == '\\' and addr[c + 1] == ' ':
c += 2
continue
if ord(addr[c]) < 32 or ord(addr[c]) >= 127:
return False
c += 1
else:
return False
if addr[c] == '@':
break
if addr[c] != '.':
return False
c += 1
continue
if addr[c] == '@':
break
if ord(addr[c]) <= 32 or ord(addr[c]) >= 127:
return False
if addr[c] in rfc822_specials:
return False
c += 1
if not c or addr[c - 1] == '.':
return False
# Next we validate the domain portion (name@domain)
domain = c = c + 1
if domain >= len(addr):
return False
count = 0
while c < len(addr):
if addr[c] == '.':
if c == domain or addr[c - 1] == '.':
return False
count += 1
if ord(addr[c]) <= 32 or ord(addr[c]) >= 127:
return False
if addr[c] in rfc822_specials:
return False
c += 1
logger.info('%s is a valid email address' % addr)
return count >= 1
def valid_user_path(path, home_dir, allow_equal=False):
"""This is a convenience function for making sure that users do
not access restricted files including files outside their own file
tree(s): Check that path is a valid path inside user home directory,
home_dir and it does not map to an invisible file or dir.
In a few situations it may be relevant to not allow an exact
match, e.g. to prevent users from deleting the base of their
home directory.
This check also rejects all 'invisible' files like htaccess files.
NB: This check relies on the home_dir already verified from
certificate data.
Thus this function should *only* be used in relation to
checking user home related paths. Other paths should be
validated with the valid_dir_input function below.
"""
real_path = os.path.abspath(path)
if invisible_path(real_path):
return False
real_home = os.path.abspath(home_dir)
inside = real_path.startswith(real_home + os.sep)
if not allow_equal:
# real_path must be real_home/X
return inside
else:
# real_path must be either real_home/X or real_home
try:
same = os.path.samefile(real_home, real_path)
except Exception:
# At least one of the paths doesn't exist
same = False
return inside or same
def valid_dir_input(base, variable):
"""This function verifies that user supplied variable used as a directory
in file manipulation doesn't try to illegally access directories by
using e.g. '..'. The base argument is the directory that the user
should be bound to, and the variable is the variable to be checked.
The verification amounts to verifying that base/variable doesn't
expand to a path outside base or among the invisible paths.
"""
# Please note that base_dir must end in slash to avoid access to other
# dirs when variable is a prefix of another dir in base
path = os.path.abspath(base) + os.sep + variable
if os.path.abspath(path) != path or invisible_path(path):
# out of bounds
return False
return True
|
heromod/migrid
|
mig/shared/validstring.py
|
Python
|
gpl-2.0
| 5,156
|
[
"Brian"
] |
b4ebd18f2ed6c79c396f2267336a5c137c94fc4604298a958ae22188e0101da8
|
"""
LAMMPS (sci-physics/lammps) project within gentoo chroot.
"""
from os import path
from glob import glob
from benchbuild.utils.wrapping import wrap_in_uchroot as wrap, strip_path_prefix
from benchbuild.projects.gentoo.gentoo import GentooGroup
from benchbuild.utils.downloader import Wget
from benchbuild.utils.run import run, uchroot
from plumbum import local
from benchbuild.utils.cmd import tar # pylint: disable=E0401
class Lammps(GentooGroup):
"""
sci-physics/lammps
"""
NAME = "gentoo-lammps"
DOMAIN = "sci-physics"
test_url = "http://lairosiel.de/dist/"
test_archive = "lammps.tar.gz"
def prepare(self):
super(Lammps, self).prepare()
test_archive = self.test_archive
test_url = self.test_url + test_archive
Wget(test_url, test_archive)
tar("fxz", test_archive)
def build(self):
emerge_in_chroot = uchroot()["/usr/bin/emerge"]
with local.env(USE="-mpi -doc"):
run(emerge_in_chroot["sci-physics/lammps"])
def run_tests(self, experiment, run):
wrap(
path.join(self.builddir, "usr/bin/lmp_serial"), experiment,
self.builddir)
lammps = uchroot()["/usr/bin/lmp_serial"]
lammps_dir = path.join(self.builddir, "lammps")
with local.cwd("lammps"):
tests = glob(path.join(lammps_dir, "in.*"))
for test in tests:
run((lammps < strip_path_prefix(test, self.builddir)))
|
simbuerg/benchbuild
|
benchbuild/projects/gentoo/lammps.py
|
Python
|
mit
| 1,483
|
[
"LAMMPS"
] |
7208c74395da8ea676210d9cb9473afbb6b8a5cd2c1f18b8d3360402904d3588
|
from __future__ import absolute_import
input_name = '../examples/linear_elasticity/material_nonlinearity.py'
output_name = 'test_material_nonlinearity.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
|
rc/sfepy
|
tests/test_input_material_nonlinearity.py
|
Python
|
bsd-3-clause
| 223
|
[
"VTK"
] |
d28e1fed88a2bbf6d4a092e85275bf606ae6d5552f21331f61d81d8afdcfa992
|
from math import pi, sqrt, fabs
import numpy as np
from numpy import exp
from gpaw.poisson import PoissonSolver
from gpaw.utilities import cerf, erf
from gpaw.utilities.gauss import Gaussian
from gpaw.fd_operators import FDOperator, laplace
from gpaw.transformers import Transformer
class HelmholtzGaussian(Gaussian):
def get_phi(self, k2):
"""Get the solution of the Helmholtz equation for a Gaussian."""
# This should lead to very big errors
r = np.sqrt(self.r2)
k = sqrt(k2)
sigma = 1. / sqrt(2 * self.a)
p = sigma * k / sqrt(2)
i = 1j
rhop = r / sqrt(2) / sigma + i * p
rhom = r / sqrt(2) / sigma - i * p
h = np.sin(k * r)
# the gpaw-Gaussian is sqrt(4 * pi) times a 3D normalized Gaussian
return sqrt(4 * pi) * exp(-p**2) / r / 2 * (
np.cos(k * r) * (cerf(rhop) + cerf(rhom)) +
i * np.sin(k * r) * (2 + cerf(rhop) - cerf(rhom)))
class ScreenedPoissonGaussian(Gaussian):
def get_phi(self, mu2):
"""Get the solution of the screened Poisson equation for a Gaussian.
The Gaussian is centered to middle of grid-descriptor."""
r = np.sqrt(self.r2)
mu = sqrt(mu2)
sigma = 1. / sqrt(2 * self.a)
sig2 = sigma**2
mrho = (sig2 * mu - r) / (sqrt(2) * sigma)
prho = (sig2 * mu + r) / (sqrt(2) * sigma)
def erfc(values):
return 1. - erf(values)
# the gpaw-Gaussian is sqrt(4 * pi) times a 3D normalized Gaussian
return sqrt(4 * pi) * exp(sig2 * mu2 / 2.0) / (2 * r) * (
exp(-mu * r) * erfc(mrho) - exp(mu * r) * erfc(prho))
class HelmholtzOperator(FDOperator):
def __init__(self, gd, scale=1.0, n=1, dtype=float, k2=0.0):
"""Helmholtz for general non orthorhombic grid.
gd: GridDescriptor
Descriptor for grid.
scale: float
Scaling factor. Use scale=-0.5 for a kinetic energy operator.
n: int
Range of stencil. Stencil has O(h^(2n)) error.
dtype: float or complex
Datatype to work on.
"""
# Order the 13 neighbor grid points:
M_ic = np.indices((3, 3, 3)).reshape((3, -3)).T[-13:] - 1
u_cv = gd.h_cv / (gd.h_cv**2).sum(1)[:, np.newaxis]**0.5
u2_i = (np.dot(M_ic, u_cv)**2).sum(1)
i_d = u2_i.argsort()
m_mv = np.array([(2, 0, 0), (0, 2, 0), (0, 0, 2),
(0, 1, 1), (1, 0, 1), (1, 1, 0)])
# Try 3, 4, 5 and 6 directions:
for D in range(3, 7):
h_dv = np.dot(M_ic[i_d[:D]], gd.h_cv)
A_md = (h_dv**m_mv[:, np.newaxis, :]).prod(2)
a_d, residual, rank, s = np.linalg.lstsq(A_md, [1, 1, 1, 0, 0, 0])
if residual.sum() < 1e-14:
assert rank == D, 'You have a weird unit cell!'
# D directions was OK
break
a_d *= scale
offsets = [(0, 0, 0)]
coefs = [laplace[n][0] * a_d.sum()]
coefs[0] += k2 * scale
for d in range(D):
M_c = M_ic[i_d[d]]
offsets.extend(np.arange(1, n + 1)[:, np.newaxis] * M_c)
coefs.extend(a_d[d] * np.array(laplace[n][1:]))
offsets.extend(np.arange(-1, -n - 1, -1)[:, np.newaxis] * M_c)
coefs.extend(a_d[d] * np.array(laplace[n][1:]))
FDOperator.__init__(self, coefs, offsets, gd, dtype)
self.description = (
'%d*%d+1=%d point O(h^%d) finite-difference Helmholtz' %
((self.npoints - 1) // n, n, self.npoints, 2 * n))
class HelmholtzSolver(PoissonSolver):
"""Solve the Helmholtz or screened Poisson equations.
The difference between the Helmholtz equation:
(Laplace + k^2) phi = n
and the screened Poisson equation:
(Laplace - mu^2) phi = n
is only the sign of the added inhomogenity. Because of
this we can use one class to solve both. So if k2 is
greater zero we'll try to solve the Helmhlotz equation,
otherwise we'll try to solve the screened Poisson equation.
"""
def __init__(self, k2=0.0, nn='M', relax='GS', eps=2e-10):
assert k2 <= 0, 'Currently only defined for k^2<=0'
PoissonSolver.__init__(self, nn, relax, eps)
self.k2 = k2
def set_grid_descriptor(self, gd):
# Should probably be renamed initialize
self.gd = gd
self.dv = gd.dv
gd = self.gd
scale = -0.25 / pi
if self.nn == 'M':
raise ValueError(
'Helmholtz not defined for Mehrstellen stencil')
self.operators = [HelmholtzOperator(gd, scale, self.nn, k2=self.k2)]
self.B = None
self.interpolators = []
self.restrictors = []
level = 0
self.presmooths = [2]
self.postsmooths = [1]
# Weights for the relaxation,
# only used if 'J' (Jacobi) is chosen as method
self.weights = [2.0 / 3.0]
while level < 4:
try:
gd2 = gd.coarsen()
except ValueError:
break
self.operators.append(HelmholtzOperator(gd2, scale, 1,
k2=self.k2))
self.interpolators.append(Transformer(gd2, gd))
self.restrictors.append(Transformer(gd, gd2))
self.presmooths.append(4)
self.postsmooths.append(4)
self.weights.append(1.0)
level += 1
gd = gd2
self.levels = level
if self.relax_method == 1:
self.description = 'Gauss-Seidel'
else:
self.description = 'Jacobi'
self.description += ' solver with %d multi-grid levels' % (level + 1)
self.description += '\nStencil: ' + self.operators[0].description
def load_gauss(self):
"""Load the gaussians."""
if not hasattr(self, 'rho_gauss'):
if self.k2 > 0:
gauss = HelmholtzGaussian(self.gd)
else:
gauss = ScreenedPoissonGaussian(self.gd)
self.rho_gauss = gauss.get_gauss(0)
self.phi_gauss = gauss.get_phi(abs(self.k2))
|
robwarm/gpaw-symm
|
gpaw/helmholtz.py
|
Python
|
gpl-3.0
| 6,261
|
[
"GPAW",
"Gaussian"
] |
cf0cea195c8c781e266243b8842130671f56910bfd6432dd994e2524f753da6e
|
# ##WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING #
# Under development #
# pylint: skip-file
# ##WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING###WARNING #
""" Module to upload specified job output files according to the parameters
defined in the production workflow.
"""
from DIRAC import gLogger
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase, GracefulTermination
class UploadOutputs( ModuleBase ):
#############################################################################
def __init__( self ):
""" c'tor
"""
self.log = gLogger.getSubLogger( "UploadOutputs" )
super( UploadOutputs, self ).__init__( self.log )
self.outputDataStep = ''
self.outputData = []
self.outputList = []
#############################################################################
def _resolveInputVariables( self ):
""" The module parameters are resolved here.
"""
super( UploadOutputs, self )._resolveInputVariables()
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputData' ):
self.outputData = self.workflow_commons['OutputData']
if not isinstance( self.outputData, list ): # type( userOutputData ) == type( [] ):
self.outputData = [ i.strip() for i in self.outputData.split( ';' ) ]
# if not present, we use the outputList, which is instead incrementally created based on the single step outputs
# This is more typical for production jobs, that can have many steps linked one after the other
elif self.workflow_commons.has_key( 'outputList' ):
self.outputList = self.workflow_commons['outputList']
else:
raise GracefulTermination( 'Nothing to upload' )
# in case you want to put a mask on the steps
# TODO: add it to the DIRAC API
if self.workflow_commons.has_key( 'outputDataStep' ):
self.outputDataStep = self.workflow_commons['outputDataStep']
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputSE' ):
specifiedSE = self.workflow_commons['OutputSE']
if not type( specifiedSE ) == type( [] ):
self.utputSE = [i.strip() for i in specifiedSE.split( ';' )]
else:
self.log.verbose( 'No OutputSE specified, using default value: %s' % ( ', '.join( self.defaultOutputSE ) ) )
self.outputSE = []
# this comes from Job().setOutputData(). Typical for user jobs
if self.workflow_commons.has_key( 'OutputPath' ):
self.outputPath = self.workflow_commons['OutputPath']
def _initialize( self ):
""" gets the files to upload, check if to upload
"""
# lfnsList = self.__getOutputLFNs( self.outputData ) or outputList?
if not self._checkWFAndStepStatus():
raise GracefulTermination( 'No output data upload attempted' )
def __getOuputLFNs( self, outputList, *args ):
""" This is really VO-specific.
It should be replaced by each VO. Setting an LFN here just as an idea, and for testing purposes.
"""
lfnList = []
for outputFile in outputList:
lfnList.append( '/'.join( [str( x ) for x in args] ) + outputFile )
return lfnList
def _execute( self ):
""" uploads the files
"""
pass
|
Andrew-McNab-UK/DIRAC
|
Workflow/Modules/UploadOutputs.py
|
Python
|
gpl-3.0
| 3,477
|
[
"DIRAC"
] |
873165088a13ee059158aee4aa3177bb80bb1ebf300a1c764e818f3acdf3928f
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RXvector(RPackage):
"""Memory efficient S4 classes for storing sequences "externally" (behind
an R external pointer, or on disk)."""
homepage = "https://bioconductor.org/packages/XVector/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/XVector_0.16.0.tar.gz"
list_url = homepage
version('0.16.0', '839426de12cbb570c85f27f8e7afb144')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@0.16.0')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-xvector/package.py
|
Python
|
lgpl-2.1
| 1,905
|
[
"Bioconductor"
] |
797eed5c57a151612bd77b5de5a7a94d2cfdcfd72591c87df055fc1d1dd6f3cb
|
# Copyright (c) 2016 Paulo Eduardo Rauber
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ^
# / \
# |
# |
#
# License included because this module is a heavily modified version based on
# Paulo's implementation of dynamic t-SNE.
# (https://github.com/paulorauber/thesne)
import math
import numpy as np
import theano
import theano.tensor as T
from sklearn.utils import check_random_state
from scipy.spatial.distance import pdist
from modules.layout_io import save_drawing
epsilon = 1e-16
floath = np.float32
class SigmaTooLowException(Exception):
pass
class NaNException(Exception):
pass
# Squared Euclidean distance between all pairs of row-vectors
def sqeuclidean_var(X):
N = X.shape[0]
ss = (X ** 2).sum(axis=1)
return ss.reshape((N, 1)) + ss.reshape((1, N)) - 2 * X.dot(X.T)
# Euclidean distance between all pairs of row-vectors
def euclidean_var(X):
return T.maximum(sqeuclidean_var(X), epsilon) ** 0.5
# Conditional probabilities of picking (ordered) pairs in high-dim space.
def p_ij_conditional_var(X, sigma):
N = X.shape[0]
sqdistance = X**2
esqdistance = T.exp(-sqdistance / ((2 * (sigma**2)).reshape((N, 1))))
esqdistance_zd = T.fill_diagonal(esqdistance, 0)
row_sum = T.sum(esqdistance_zd, axis=1).reshape((N, 1))
return esqdistance_zd / row_sum # Possibly dangerous
# Symmetrized probabilities of picking pairs in high-dim space.
def p_ij_sym_var(p_ij_conditional):
return (p_ij_conditional + p_ij_conditional.T) / (2 * p_ij_conditional.shape[0])
# Probabilities of picking pairs in low-dim space (using Student
# t-distribution).
def q_ij_student_t_var(Y):
sqdistance = sqeuclidean_var(Y)
one_over = T.fill_diagonal(1 / (sqdistance + 1), 0)
return one_over / one_over.sum()
# Probabilities of picking pairs in low-dim space (using Gaussian).
def q_ij_gaussian_var(Y):
sqdistance = sqeuclidean_var(Y)
gauss = T.fill_diagonal(T.exp(-sqdistance), 0)
return gauss / gauss.sum()
# Per point cost function
def cost_var(X, Y, sigma, Adj, l_kl, l_e, l_c, l_r, r_eps):
N = X.shape[0]
num_edges = 0.5 * T.sum(Adj)
# Used to normalize s.t. the l_*'s sum up to one.
l_sum = l_kl + l_e + l_c + l_r
p_ij_conditional = p_ij_conditional_var(X, sigma)
p_ij = p_ij_sym_var(p_ij_conditional)
q_ij = q_ij_student_t_var(Y)
p_ij_safe = T.maximum(p_ij, epsilon)
q_ij_safe = T.maximum(q_ij, epsilon)
# Kullback-Leibler term
kl = T.sum(p_ij * T.log(p_ij_safe / q_ij_safe), axis=1)
# Edge contraction term
edge_contraction = (1 / (2 * num_edges)) * T.sum(Adj * sqeuclidean_var(Y), axis=1)
# Compression term
compression = (1 / (2 * N)) * T.sum(Y**2, axis=1)
# Repulsion term
# repulsion = (1 / (2 * N**2)) * T.sum(T.fill_diagonal(1 / (euclidean_var(Y) + r_eps), 0), axis=1)
repulsion = -(1 / (2 * N**2)) * T.sum(T.fill_diagonal(T.log(euclidean_var(Y) + r_eps), 0), axis=1)
cost = (l_kl / l_sum) * kl + (l_e / l_sum) * edge_contraction + (l_c / l_sum) * compression + (l_r / l_sum) * repulsion
return cost
# Binary search on sigma for a given perplexity
def find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, verbose=0):
X = T.fmatrix('X')
sigma = T.fvector('sigma')
target = np.log(perplexity)
P = T.maximum(p_ij_conditional_var(X, sigma), epsilon)
entropy = -T.sum(P * T.log(P), axis=1)
# Setting update for binary search interval
sigmin_shared = theano.shared(np.full(N, np.sqrt(epsilon), dtype=floath))
sigmax_shared = theano.shared(np.full(N, np.inf, dtype=floath))
sigmin = T.fvector('sigmin')
sigmax = T.fvector('sigmax')
upmin = T.switch(T.lt(entropy, target), sigma, sigmin)
upmax = T.switch(T.gt(entropy, target), sigma, sigmax)
givens = {X: X_shared, sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigmin_shared, upmin), (sigmax_shared, upmax)]
update_intervals = theano.function([], entropy, givens=givens, updates=updates)
# Setting update for sigma according to search interval
upsigma = T.switch(T.isinf(sigmax), sigma * 2, (sigmin + sigmax) / 2.)
givens = {sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigma_shared, upsigma)]
update_sigma = theano.function([], sigma, givens=givens, updates=updates)
for i in range(sigma_iters):
e = update_intervals()
update_sigma()
if verbose:
print('[find_sigma] Iteration {0}: Perplexities in [{1:.4f}, {2:.4f}].'.format(i + 1, np.exp(e.min()), np.exp(e.max())), end='\r')
if verbose:
print('\n[find_sigma] Done! Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(e.min()), np.exp(e.max())))
if np.any(np.isnan(np.exp(e))):
raise SigmaTooLowException('Invalid sigmas. The perplexity is probably too low.')
# Receives vectors in Y, and moves co-located vertices in opposite directions,
# to assist in the repulsion of vertices.
def switch_shake(Y, magnitude=1e-5):
N = Y.shape[0]
# Auxiliary functions for translating from square to condensed indexing
# of the distance matrix.
def calc_row_idx(k, n):
return int(math.ceil((1 / 2.) * (- (-8 * k + 4 * n**2 - 4 * n - 7)**0.5 + 2 * n - 1) - 1))
def elem_in_i_rows(i, n):
return i * (n - 1 - i) + (i * (i + 1)) / 2
def calc_col_idx(k, i, n):
return int(n - elem_in_i_rows(i + 1, n) + k)
def condensed_to_square(k, n):
i = calc_row_idx(k, n)
j = calc_col_idx(k, i, n)
return i, j
euclid_dist = pdist(Y)
max_dist = euclid_dist.max()
for idx in np.where(euclid_dist <= np.finfo(np.float32).eps)[0]:
(i, j) = condensed_to_square(idx, N)
nudge = np.random.normal(0, max_dist * magnitude, 2)
# v_i and v_j are co-located. Move v_i in a direction, and move v_j in
# the opposite direction.
Y[i, :] += nudge
Y[j, :] -= nudge
return Y
# Perform momentum-based gradient descent on the cost function with the given
# parameters. Return the vertex coordinates and per-vertex cost.
def find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, n_epochs,
initial_lr, final_lr, lr_switch, init_stdev, initial_momentum,
final_momentum, momentum_switch,
initial_l_kl, final_l_kl, l_kl_switch,
initial_l_e, final_l_e, l_e_switch,
initial_l_c, final_l_c, l_c_switch,
initial_l_r, final_l_r, l_r_switch,
r_eps,
Adj_shared, g=None, save_every=None, output_folder=None, verbose=0):
# Optimization hyperparameters
initial_lr = np.array(initial_lr, dtype=floath)
final_lr = np.array(final_lr, dtype=floath)
initial_momentum = np.array(initial_momentum, dtype=floath)
final_momentum = np.array(final_momentum, dtype=floath)
# Hyperparameters used within Theano
lr = T.fscalar('lr')
lr_shared = theano.shared(initial_lr)
momentum = T.fscalar('momentum')
momentum_shared = theano.shared(initial_momentum)
# Cost parameters
initial_l_kl = np.array(initial_l_kl, dtype=floath)
final_l_kl = np.array(final_l_kl, dtype=floath)
initial_l_e = np.array(initial_l_e, dtype=floath)
final_l_e = np.array(final_l_e, dtype=floath)
initial_l_c = np.array(initial_l_c, dtype=floath)
final_l_c = np.array(final_l_c, dtype=floath)
initial_l_r = np.array(initial_l_r, dtype=floath)
final_l_r = np.array(final_l_r, dtype=floath)
# Cost parameters used within Theano
l_kl = T.fscalar('l_kl')
l_kl_shared = theano.shared(initial_l_kl)
l_e = T.fscalar('l_e')
l_e_shared = theano.shared(initial_l_e)
l_c = T.fscalar('l_c')
l_c_shared = theano.shared(initial_l_c)
l_r = T.fscalar('l_r')
l_r_shared = theano.shared(initial_l_r)
# High-dimensional observations (connectivities of vertices)
X = T.fmatrix('X')
# 2D projection (coordinates of vertices)
Y = T.fmatrix('Y')
# Adjacency matrix
Adj = T.fmatrix('Adj')
# Standard deviations used for Gaussians to attain perplexity
sigma = T.fvector('sigma')
# Y velocities (for momentum-based descent)
Yv = T.fmatrix('Yv')
Yv_shared = theano.shared(np.zeros((N, output_dims), dtype=floath))
# Function for retrieving cost for all individual data points
costs = cost_var(X, Y, sigma, Adj, l_kl, l_e, l_c, l_r, r_eps)
# Sum of all costs (scalar)
cost = T.sum(costs)
# Gradient of the cost w.r.t. Y
grad_Y = T.grad(cost, Y)
# Update step for velocity
update_Yv = theano.function(
[], None,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Yv: Yv_shared,
Adj: Adj_shared,
lr: lr_shared,
momentum: momentum_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
},
updates=[
(Yv_shared, momentum * Yv - lr * grad_Y)
]
)
# Gradient descent step
update_Y = theano.function(
[], [],
givens={
Y: Y_shared, Yv: Yv_shared
},
updates=[
(Y_shared, Y + Yv)
]
)
# Build function to retrieve cost
get_cost = theano.function(
[], cost,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Adj: Adj_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
}
)
# Build function to retrieve per-vertex cost
get_costs = theano.function(
[], costs,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Adj: Adj_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
}
)
# Optimization loop
for epoch in range(n_epochs):
# Switch parameter if a switching point is reached.
if epoch == lr_switch:
lr_shared.set_value(final_lr)
if epoch == momentum_switch:
momentum_shared.set_value(final_momentum)
if epoch == l_kl_switch:
l_kl_shared.set_value(final_l_kl)
if epoch == l_e_switch:
l_e_shared.set_value(final_l_e)
if epoch == l_c_switch:
l_c_shared.set_value(final_l_c)
if epoch == l_r_switch:
l_r_shared.set_value(final_l_r)
if final_l_r != 0:
# Give a nudge to co-located vertices in the epoch before the
# repulsion kicks in (otherwise they don't feel any).
Y_shared.set_value(switch_shake(Y_shared.get_value()))
# Do update step for velocity
update_Yv()
# Do a gradient descent step
update_Y()
c = get_cost()
if np.isnan(float(c)):
raise NaNException('Encountered NaN for cost.')
if verbose:
print('[tsne] Epoch: {0}. Cost: {1:.6f}.'.format(epoch + 1, float(c)), end='\r')
if output_folder is not None and g is not None and save_every is not None and epoch % save_every == 0:
# Get per-vertex cost for colour-coding
cs = get_costs()
# Save a snapshot
save_drawing(output_folder, g, Y_shared.get_value().T, 'tsne_snap_' + str(epoch).zfill(5), formats=['jpg'], verbose=False, edge_colors="rgb", draw_vertices=False, opacity=0.3)
# Get per-vertex cost
cs = get_costs()
if verbose:
print('\n[tsne] Done! ')
return np.array(Y_shared.get_value()), cs
def tsne(X, perplexity=30, Y=None, output_dims=2, n_epochs=1000,
initial_lr=10, final_lr=4, lr_switch=None, init_stdev=1e-4,
sigma_iters=50, initial_momentum=0.5, final_momentum=0.8,
momentum_switch=250,
initial_l_kl=None, final_l_kl=None, l_kl_switch=None,
initial_l_e=None, final_l_e=None, l_e_switch=None,
initial_l_c=None, final_l_c=None, l_c_switch=None,
initial_l_r=None, final_l_r=None, l_r_switch=None,
r_eps=1, random_state=None, Adj=None, g=None,
save_every=None, snaps_output_folder=None, verbose=1):
random_state = check_random_state(random_state)
N = X.shape[0]
X_shared = theano.shared(np.asarray(X, dtype=floath))
sigma_shared = theano.shared(np.ones(N, dtype=floath))
if Y is None:
Y = random_state.normal(0, init_stdev, size=(N, output_dims))
Y_shared = theano.shared(np.asarray(Y, dtype=floath))
# Find sigmas to attain the given perplexity.
find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, verbose)
# Do the optimization to find Y (the vertex coordinates).
Y, costs = find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, n_epochs,
initial_lr, final_lr, lr_switch, init_stdev, initial_momentum,
final_momentum, momentum_switch,
initial_l_kl, final_l_kl, l_kl_switch,
initial_l_e, final_l_e, l_e_switch,
initial_l_c, final_l_c, l_c_switch,
initial_l_r, final_l_r, l_r_switch,
r_eps,
Adj, g, save_every,
snaps_output_folder, verbose)
# Return the vertex coordinates and the per-vertex costs.
return Y, costs
|
HanKruiger/tsnetwork
|
src/modules/thesne.py
|
Python
|
mit
| 14,623
|
[
"Gaussian"
] |
9e7c7d2342f654c67b4e21cea84fdbe85ab7f1f99300340bfee4ea59bba70919
|
rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
from operator import itemgetter
rows_by_fname = sorted(rows,key=itemgetter('fname'))
rows_by_uid = sorted(rows,key=itemgetter('uid'))
rows_by_lfname = sorted(rows,key=itemgetter('lname','fname'))
print rows_by_lfname
print min(rows,key=itemgetter('uid'))
print max(rows,key=itemgetter('uid'))
|
giftman/Gifts
|
Python/moveit/cookbook/p13_sort_list_of_dicts_by_key.py
|
Python
|
apache-2.0
| 524
|
[
"Brian"
] |
07608b50f676f6ee76c8ceebec48c553307040d58590100a6436b764a73b113e
|
"""
Demo if adaptive adversary works against feature squeezing.
Embed the diffrentiable filter layers in a model.
Pass in the (average) gradient (part of loss) to an attack algorithm.
Implement the gaussian-noise-iterative method for non-diffrentiable filter layers (bit depth reduction.)
Introduce the randomized feature squeezing (need to verify with legitimate examples, should not harm the accuracy.)
"""
import os
import tensorflow as tf
import numpy as np
import math
# Core: Get the gradient of models for the attack algorithms.
# We will combine the gradient of several models.
from keras.models import Model
from keras.layers import Lambda, Input
def insert_pre_processing_layer_to_model(model, input_shape, func):
# Output model: accept [-0.5, 0.5] input range instead of [0,1], output logits instead of softmax.
# The output model will have three layers in abstract: Input, Lambda, TrainingModel.
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
input_tensor = Input(shape=input_shape)
scaler_layer = Lambda(func, input_shape=input_shape)(input_tensor)
output_tensor = model_logits(scaler_layer)
model_new = Model(inputs=input_tensor, outputs=output_tensor)
return model_new
# maybe_generate_adv_examples(sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache = x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath)
def adaptive_attack(sess, model, squeezers, x, y, X_test, Y_test_target, attack_name, attack_params):
for squeeze_func in squeezers:
predictions = model(squeeze_func(x))
# tf.contrib.distributions.kl(dist_a, dist_b, allow_nan=False, name=None)
# from .median import median_filter as median_filter_tf
# from .median import median_random_filter as median_random_filter_tf
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from utils.squeeze import get_squeezer_by_name, reduce_precision_tf
# if FLAGS.dataset_name == "MNIST":
# # squeezers_name = ['median_smoothing_2', 'median_smoothing_3', 'binary_filter']
# squeezers_name = ['median_smoothing_2', 'binary_filter']
# elif FLAGS.dataset_name == "CIFAR-10":
# squeezers_name = ["bit_depth_5", "bit_depth_4", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2', 'median_smoothing_1_3']
# elif FLAGS.dataset_name == "ImageNet":
# squeezers_name = ["bit_depth_5", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2']
def get_tf_squeezer_by_name(name):
return get_squeezer_by_name(name, 'tensorflow')
tf_squeezers_name_mnist = ['median_filter_2_2', 'bit_depth_1']
tf_squeezers_name_cifar10 = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5', 'bit_depth_4']
tf_squeezers_name_imagenet = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5']
# tf_squeezers = map(get_tf_squeezer_by_name, tf_squeezers_name)
def get_tf_squeezers_by_str(tf_squeezers_str):
tf_squeezers_name = tf_squeezers_str.split(',')
return map(get_tf_squeezer_by_name, tf_squeezers_name)
def kl_tf(x1, x2, eps = 0.000000001):
x1 = tf.clip_by_value(x1, eps, 1)
x2 = tf.clip_by_value(x2, eps, 1)
return tf.reduce_sum(x1 * tf.log(x1/x2), reduction_indices=[1])
def generate_adaptive_carlini_l2_examples(sess, model, x, y, X, Y_target, attack_params, verbose, attack_log_fpath):
# (model, x, y, X, Y_target, tf_squeezers=tf_squeezers, detector_threshold = 0.2):
# tf_squeezers=tf_squeezers
eval_dir = os.path.dirname(attack_log_fpath)
default_params = {
'batch_size': 100,
'confidence': 0,
'targeted': False,
'learning_rate': 9e-2,
'binary_search_steps': 9,
'max_iterations': 5000,
'abort_early': False, # TODO: not suported.
'initial_const': 0.0,
'detector_threshold': 0.3,
'uint8_optimized': False,
'tf_squeezers': [],
'distance_measure': 'l1',
'between_squeezers': False,
}
if 'tf_squeezers' in attack_params:
tf_squeezers_str = attack_params['tf_squeezers']
tf_squeezers = get_tf_squeezers_by_str(tf_squeezers_str)
attack_params['tf_squeezers'] = tf_squeezers
accepted_params = default_params.keys()
for k in attack_params:
if k not in accepted_params:
raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k)
else:
default_params[k] = attack_params[k]
# assert batch_size <= len(X)
if 'batch_size' in default_params and default_params['batch_size'] > len(X):
default_params['batch_size'] = len(X)
return adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, **default_params)
def adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, batch_size, confidence, targeted, learning_rate, binary_search_steps, max_iterations, abort_early, initial_const, detector_threshold, uint8_optimized, tf_squeezers, distance_measure, between_squeezers):
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
# Need a determined batch size for coefficient vectors.
x = tf.placeholder(shape=X.shape, dtype=tf.float32)
y = tf.placeholder(shape=Y_target.shape, dtype=tf.float32)
# Adapted from Warren and Carlini's code
N0, H0, W0, C0 = X.shape
# Range [0, 1], initialize as the original images.
batch_images = X
# Get the arctanh of the original images.
batch_images_tanh = np.arctanh((batch_images - 0.5) / 0.501)
batch_labels = Y_target
x_star_tanh = tf.Variable(batch_images_tanh, dtype=tf.float32)
# Range [0, 1], initialize as the original images.
x_star = tf.tanh(x_star_tanh) / 2. + 0.5
# The result is optimized for uint8.
x_star_uint8 = reduce_precision_tf(x_star, 256)
# Gradient required.
y_pred_logits = model_logits(x_star)
y_pred = model(x_star)
print ("tf_squezers: %s" % tf_squeezers)
y_squeezed_pred_list = [ model(func(x_star)) for func in tf_squeezers ]
coeff = tf.placeholder(shape=(N0,), dtype=tf.float32)
l2dist = tf.reduce_sum(tf.square(x_star - x), [1, 2, 3])
ground_truth_logits = tf.reduce_sum(y * y_pred_logits, 1)
top_other_logits = tf.reduce_max((1 - y) * y_pred_logits - (y * 10000), 1)
# Untargeted attack, minimize the ground_truth_logits.
# target_penalty = tf.maximum(0., ground_truth_logits - top_other_logits)
if targeted is False:
# if untargeted, optimize for making this class least likely.
target_penalty = tf.maximum(0.0, ground_truth_logits-top_other_logits+confidence)
else:
# if targetted, optimize for making the other class most likely
target_penalty = tf.maximum(0.0, top_other_logits-ground_truth_logits+confidence)
# Minimize the sum of L1 score.
detector_penalty = None
# TODO: include between squeezers l1.
all_pred_list = [y_pred] + y_squeezed_pred_list
if between_squeezers:
print ("#Between squeezers")
for i, pred_base in enumerate(all_pred_list):
for j in range(i+1, len(all_pred_list)):
pred_target = all_pred_list[j]
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(pred_base - pred_target), 1)
elif distance_measure == 'kl_f':
score = kl_tf(pred_base, pred_target)
elif distance_measure == 'kl_b':
score = kl_tf(pred_target, pred_base)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
else:
for y_squeezed_pred in y_squeezed_pred_list:
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(y_pred - y_squeezed_pred), 1)
elif distance_measure == 'kl_f':
score = kl_tf(y_pred, y_squeezed_pred)
elif distance_measure == 'kl_b':
score = kl_tf(y_squeezed_pred, y_pred)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
# There could be different desion choices. E.g. add one coefficient for the detector penalty.
loss = tf.add((target_penalty + detector_penalty) * coeff, l2dist)
# Minimize loss by updating variables in var_list.
train_adv_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=[x_star_tanh])
# Why the last four global variables are the optimizer variables?
# <tf.Variable 'beta1_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'beta2_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'Variable/Adam:0' shape=(10, 28, 28, 1) dtype=float32_ref>
# <tf.Variable 'Variable/Adam_1:0' shape=(10, 28, 28, 1) dtype=float32_ref>
optimizer_variables = tf.global_variables()[-4:]
# The result is optimized for uint8. Added by Weilin.
if uint8_optimized:
predictions = tf.argmax(model_logits(x_star_uint8), 1)
else:
predictions = tf.argmax(model_logits(x_star), 1)
if targeted is False:
correct_prediction = tf.equal(predictions, tf.argmax(y, 1))
else:
correct_prediction = tf.not_equal(predictions, tf.argmax(y, 1))
# Initialize loss coefficients
coeff_block_log = np.tile([[initial_const], [float('nan')], [float('nan')]], (1, N0))
coeff_curr_log = coeff_block_log[0]
coeff_high_log = coeff_block_log[1]
coeff_low_log = coeff_block_log[2]
# Collect best adversarial images
best_l2 = np.zeros((N0,)) + float('nan')
best_coeff_log = np.zeros((N0,)) + float('nan')
best_iter = np.zeros((N0,)) + float('nan')
best_images = np.copy(batch_images)
# I didn't find the initialization of random perturbations?
for _ in range(binary_search_steps):
# Reset x_star_tanh and optimizer
sess.run(tf.variables_initializer([x_star_tanh] + optimizer_variables))
tf.assert_variables_initialized()
print (coeff_curr_log) # %%%
curr_coeff = np.exp(coeff_curr_log)
# Initially, all are failed adversarial examples.
all_fail = np.ones((N0,), dtype=np.bool)
# Training loop
improve_count = 0
# 5000 iterations by default.
for j in range(max_iterations):
# Correct prediction means it is failed untargeted attacks.
xst, adv_fail, l1o, l2d, _ = sess.run([x_star, correct_prediction, detector_penalty, l2dist, train_adv_step], feed_dict={
x: batch_images,
y: batch_labels,
coeff: curr_coeff,
})
all_fail = np.logical_and(all_fail, adv_fail)
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
# Save the best sucessful adversarial examples, with lowest L2.
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = j
best_images[i] = xst[i]
improve_count += 1
if j % 100 == 0:
print("Adv. training iter. {}/{} improved {}".format(j, max_iterations, improve_count))
improve_count = 0
xst, adv_fail, l1o, l2d = sess.run([x_star, correct_prediction, detector_penalty, l2dist], feed_dict={
x: batch_images,
y: batch_labels,
})
# Run it once more, becase the last iteration in for loop doesn't get evaluated.
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = max_iterations
best_images[i] = xst[i]
improve_count += 1
print("Finished training {}/{} improved {}".format(max_iterations, max_iterations, improve_count))
# Save generated examples and their coefficients
np.save(eval_dir + '/combined_adv_imgs.npy', best_images)
np.save(eval_dir + '/combined_adv_coeff_log.npy', best_coeff_log)
# Update coeff
for i, (fail, curr, high, low) in enumerate(zip(adv_fail, coeff_curr_log, coeff_high_log, coeff_low_log)):
if fail:
# increase to allow more distortion
coeff_low_log[i] = low = curr
if math.isnan(high):
coeff_curr_log[i] = curr + 2.3
else:
coeff_curr_log[i] = (high + low) / 2
else:
# decrease to penalize distortion
coeff_high_log[i] = high = curr
if math.isnan(low):
coeff_curr_log[i] = curr - 0.69
else:
coeff_curr_log[i] = (high + low) / 2
np.save(eval_dir + '/combined_coeff_log.npy', coeff_block_log)
return best_images
|
mzweilin/EvadeML-Zoo
|
attacks/adaptive/adaptive_adversary.py
|
Python
|
mit
| 13,504
|
[
"Gaussian"
] |
91cb7621f8ce05eb72eea888d2340b47af5268c94b560e626730451678dc0a54
|
"""
Create and put Requests to archive files.
**List of operations**
#. Optionally replicate files to SourceSE
#. ArchiveFiles: Create a tarball from input files, upload tarball to TarballSE
#. ReplicateAndRegister Tarball to TargetSE
#. Optionally: Add LFNs to an ArchiveSE
#. Optionally: Check for Tarball Migration
#. Remove all other replicas for these files, or remove all files
#. Remove original replica of Tarball
Will copy all the respective files and place them in to tarballs. Then the tarballs are migrated to
another storage element. Once the file is migrated to tape the original files will be
removed. Optionally the original files can be registered in a special archive SE, so that their
metadata is preserved.
**Related Options**
This script only works if the ``ArchiveFiles`` and ``CheckMigration`` RequestHandlers are configured.
To prevent submission of broken requests the script needs to be enabled in the Operations section of the CS
* Operations/DataManagement/ArchiveFiles/Enabled=True
Default values for any of the command line options can also be set in the CS
* Operations/DataManagement/ArchiveFiles/ArchiveSE
* Operations/DataManagement/ArchiveFiles/TarballSE
* Operations/DataManagement/ArchiveFiles/SourceSE
* Operations/DataManagement/ArchiveFiles/MaxFiles
* ...
"""
import os
import DIRAC
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Base.Script import Script
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
sLog = gLogger.getSubLogger("AddArchive")
MAX_SIZE = 2 * 1024 * 1024 * 1024 # 2 GB
MAX_FILES = 2000
class CreateArchiveRequest:
"""Create the request to archive files."""
def __init__(self):
"""Constructor."""
self._fcClient = None
self._reqClient = None
self.switches = {}
self.requests = []
self.lfnList = []
self.metaData = None
self.options = [
("A", "ArchiveSE", "SE for registering archive files at"),
("I", "TarballSE", "SE to initially upload tarball"),
("P", "Path", "LFN path to folder, all files in the folder will be archived"),
("N", "Name", "Name of the Tarball, if not given: Path_Tars/Path_N.tar" " will be used to store tarballs"),
("L", "List", "File containing list of LFNs to archive, requires Name to be given"),
("", "MaxFiles", "Maximum number to put in one tarball: Default %d" % MAX_FILES),
("", "MaxSize", "Maximum number of Bytes to put in one tarball: Default %d" % MAX_SIZE),
("S", "SourceSE", "Where to remove the LFNs from"),
("T", "TargetSE", "Where to move the Tarball to"),
]
self.flags = [
("M", "ReplicateTarball", "Replicate the tarball"),
("C", "CheckMigration", "Ensure the tarball is migrated to tape before removing any files or replicas"),
("D", "RemoveReplicas", "Remove Replicas from non-ArchiveSE"),
("U", "RemoveFiles", "Remove Archived files completely"),
("R", "RegisterDescendent", "Register the Tarball as a descendent of the archived LFNs"),
("", "AllowReplication", "Enable first replicating to Source-SE"),
("", "SourceOnly", "Only treat files that are already at the Source-SE"),
("X", "Execute", "Put Requests, else dryrun"),
]
self.registerSwitchesAndParseCommandLine()
self.switches["MaxSize"] = int(self.switches.setdefault("MaxSize", MAX_SIZE))
self.switches["MaxFiles"] = int(self.switches.setdefault("MaxFiles", MAX_FILES))
self.getLFNList()
self.getLFNMetadata()
self.lfnChunks = []
self.replicaSEs = []
@property
def fcClient(self):
"""Return FileCatalogClient."""
if not self._fcClient:
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
self._fcClient = FileCatalog()
return self._fcClient
@property
def reqClient(self):
"""Return RequestClient."""
if not self._reqClient:
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
self._reqClient = ReqClient()
return self._reqClient
@property
def dryRun(self):
"""Return dry run flag."""
return self.switches["DryRun"]
@property
def targetSE(self):
"""Return the list of targetSE."""
return self.switches["TargetSE"]
@property
def sourceSEs(self):
"""Return the list of sourceSEs."""
return self.switches["SourceSE"]
@property
def name(self):
"""Return the name of the Request."""
return self.switches.get("Name", None)
@property
def lfnFolderPath(self):
"""Return the lfn folder path where to find the files of the request."""
return self.switches.get("Path", None)
def registerSwitchesAndParseCommandLine(self):
"""Register the default plus additional parameters and parse options.
:param list options: list of three tuple for options to add to the script
:param list flags: list of three tuple for flags to add to the script
:param str opName
"""
for short, longOption, doc in self.options:
Script.registerSwitch(short + ":" if short else "", longOption + "=", doc)
for short, longOption, doc in self.flags:
Script.registerSwitch(short, longOption, doc)
self.switches[longOption] = False
Script.parseCommandLine()
if Script.getPositionalArgs():
Script.showHelp(exitCode=1)
ops = Operations()
if not ops.getValue("DataManagement/ArchiveFiles/Enabled", False):
sLog.error('The "ArchiveFiles" operation is not enabled, contact your administrator!')
DIRAC.exit(1)
for _short, longOption, _doc in self.options:
defaultValue = ops.getValue("DataManagement/ArchiveFiles/%s" % longOption, None)
if defaultValue:
sLog.verbose("Found default value in the CS for %r with value %r" % (longOption, defaultValue))
self.switches[longOption] = defaultValue
for _short, longOption, _doc in self.flags:
defaultValue = ops.getValue("DataManagement/ArchiveFiles/%s" % longOption, False)
if defaultValue:
sLog.verbose("Found default value in the CS for %r with value %r" % (longOption, defaultValue))
self.switches[longOption] = defaultValue
for switch in Script.getUnprocessedSwitches():
for short, longOption, doc in self.options:
if switch[0] == short or switch[0].lower() == longOption.lower():
sLog.verbose("Found switch %r with value %r" % (longOption, switch[1]))
self.switches[longOption] = switch[1]
break
for short, longOption, doc in self.flags:
if switch[0] == short or switch[0].lower() == longOption.lower():
self.switches[longOption] = True
break
self.checkSwitches()
self.switches["DryRun"] = not self.switches.get("Execute", False)
self.switches["SourceSE"] = self.switches.get("SourceSE", "").split(",")
def getLFNList(self):
"""Get list of LFNs.
Either read the provided file, or get the files found beneath the provided folder.
:param dict switches: options from command line
:returns: list of lfns
:raises: RuntimeError, ValueError
"""
if self.switches.get("List"):
if os.path.exists(self.switches.get("List")):
self.lfnList = list(
set([line.split()[0] for line in open(self.switches.get("List")).read().splitlines()])
)
else:
raise ValueError("%s not a file" % self.switches.get("List"))
elif self.lfnFolderPath:
path = self.lfnFolderPath
sLog.debug("Check if %r is a directory" % path)
isDir = returnSingleResult(self.fcClient.isDirectory(path))
sLog.debug("Result: %r" % isDir)
if not isDir["OK"] or not isDir["Value"]:
sLog.error("Path is not a directory", isDir.get("Message", ""))
raise RuntimeError("Path %r is not a directory" % path)
sLog.notice("Looking for files in %r" % path)
metaDict = {"SE": self.sourceSEs[0]} if self.switches.get("SourceOnly") else {}
lfns = self.fcClient.findFilesByMetadata(metaDict=metaDict, path=path)
if not lfns["OK"]:
sLog.error("Could not find files")
raise RuntimeError(lfns["Message"])
self.lfnList = lfns["Value"]
if self.lfnList:
sLog.notice("Will create request(s) with %d lfns" % len(self.lfnList))
if len(self.lfnList) == 1:
raise RuntimeError("Only 1 file in the list, aborting!")
return
raise ValueError('"Path" or "List" need to be provided!')
def putOrRunRequests(self):
"""Run or put requests."""
requestIDs = []
if self.dryRun:
sLog.notice("Would have created %d requests" % len(self.requests))
for reqID, req in enumerate(self.requests):
sLog.notice("Request %d:" % reqID)
for opID, op in enumerate(req):
sLog.notice(" Operation %d: %s #lfn %d" % (opID, op.Type, len(op)))
return 0
for request in self.requests:
putRequest = self.reqClient.putRequest(request)
if not putRequest["OK"]:
sLog.error("unable to put request %r: %s" % (request.RequestName, putRequest["Message"]))
continue
requestIDs.append(str(putRequest["Value"]))
sLog.always("Request %r has been put to ReqDB for execution." % request.RequestName)
if requestIDs:
sLog.always("%d requests have been put to ReqDB for execution" % len(requestIDs))
sLog.always("RequestID(s): %s" % " ".join(requestIDs))
sLog.always("You can monitor the request status using the command: dirac-rms-request <requestName/ID>")
return 0
sLog.error("No requests created")
return 1
def checkSwitches(self):
"""Check the switches, set autoName if needed."""
if not self.switches.get("SourceSE"):
raise RuntimeError('Have to set "SourceSE"')
if not self.switches.get("List") and not self.switches.get("Path"):
raise RuntimeError('Have to set "List" or "Path"')
if not self.name and self.lfnFolderPath:
self.switches["AutoName"] = os.path.join(
os.path.dirname(self.lfnFolderPath), os.path.basename(self.lfnFolderPath) + ".tar"
)
sLog.notice("Using %r for tarball" % self.switches.get("AutoName"))
if self.switches.get("List") and not self.name:
raise RuntimeError('Have to set "Name" with "List"')
if self.switches.get("RemoveReplicas") and self.switches.get("ArchiveSE") is None:
sLog.error("'RemoveReplicas' does not work without 'ArchiveSE'")
raise RuntimeError("ArchiveSE missing")
if self.switches.get("RemoveReplicas") and self.switches.get("RemoveFiles"):
sLog.error("Use either 'RemoveReplicas' or 'RemoveFiles', not both!")
raise RuntimeError("Too many removal flags")
if self.switches.get("ReplicateTarball") and not self.switches.get("TargetSE"):
sLog.error("Have to set 'TargetSE' with 'ReplicateTarball'")
raise RuntimeError("ReplicateTarball missing TargetSE")
def splitLFNsBySize(self):
"""Split LFNs into MAX_SIZE chunks of at most MAX_FILES length.
:return: list of list of lfns
"""
sLog.notice("Splitting files by Size")
lfnChunk = []
totalSize = 0
for lfn, info in self.metaData["Successful"].items():
if totalSize > self.switches["MaxSize"] or len(lfnChunk) >= self.switches["MaxFiles"]:
self.lfnChunks.append(lfnChunk)
sLog.notice("Created Chunk of %s lfns with %s bytes" % (len(lfnChunk), totalSize))
lfnChunk = []
totalSize = 0
lfnChunk.append(lfn)
totalSize += info["Size"]
self.lfnChunks.append(lfnChunk)
sLog.notice("Created Chunk of %s lfns with %s bytes" % (len(lfnChunk), totalSize))
self.replicaSEs = set(
[
seItem
for se in self.fcClient.getReplicas(self.lfnList)["Value"]["Successful"].values()
for seItem in se.keys()
]
)
def run(self):
"""Perform checks and create the request."""
if self.switches.get("AutoName"):
baseArchiveLFN = archiveLFN = self.switches["AutoName"]
tarballName = os.path.basename(archiveLFN)
else:
baseArchiveLFN = archiveLFN = self.name
tarballName = os.path.basename(archiveLFN)
baseRequestName = requestName = "Archive_%s" % tarballName.rsplit(".", 1)[0]
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
self.splitLFNsBySize()
for count, lfnChunk in enumerate(self.lfnChunks):
if not lfnChunk:
sLog.error("LFN list is empty!!!")
return 1
if len(self.lfnChunks) > 1:
requestName = "%s_%d" % (baseRequestName, count)
baseName = os.path.split(baseArchiveLFN.rsplit(".", 1)[0])
archiveLFN = "%s/%s_Tars/%s_%d.tar" % (baseName[0], baseName[1], baseName[1], count)
self.checkArchive(archiveLFN)
request = self.createRequest(requestName, archiveLFN, lfnChunk)
valid = RequestValidator().validate(request)
if not valid["OK"]:
sLog.error("putRequest: request not valid", "%s" % valid["Message"])
return 1
else:
self.requests.append(request)
self.putOrRunRequests()
return 0
def addLFNs(self, operation, lfns, addPFN=False):
"""Add lfns to operation.
:param operation: the operation instance to which the files will be added
:param list lfns: list of lfns
:param bool addPFN: if true adds PFN to each File
"""
if not self.metaData:
self.getLFNMetadata()
for lfn in lfns:
metaDict = self.metaData["Successful"][lfn]
opFile = File()
opFile.LFN = lfn
if addPFN:
opFile.PFN = lfn
opFile.Size = metaDict["Size"]
if "Checksum" in metaDict:
# should check checksum type, now assuming Adler32 (metaDict['ChecksumType'] = 'AD')
opFile.Checksum = metaDict["Checksum"]
opFile.ChecksumType = "ADLER32"
operation.addFile(opFile)
def getLFNMetadata(self):
"""Get the metadata for all the LFNs."""
metaData = self.fcClient.getFileMetadata(self.lfnList)
error = False
if not metaData["OK"]:
sLog.error("Unable to read metadata for lfns: %s" % metaData["Message"])
raise RuntimeError("Could not read metadata: %s" % metaData["Message"])
self.metaData = metaData["Value"]
for failedLFN, reason in self.metaData["Failed"].items():
sLog.error("skipping %s: %s" % (failedLFN, reason))
error = True
if error:
raise RuntimeError("Could not read all metadata")
for lfn in self.metaData["Successful"].keys():
sLog.verbose("found %s" % lfn)
def createRequest(self, requestName, archiveLFN, lfnChunk):
"""Create the Request."""
request = Request()
request.RequestName = requestName
self._checkReplicaSites(request, lfnChunk)
archiveFiles = Operation()
archiveFiles.Type = "ArchiveFiles"
archiveFiles.Arguments = DEncode.encode(
{
"SourceSE": self.sourceSEs[0],
"TarballSE": self.switches["TarballSE"],
"RegisterDescendent": self.switches["RegisterDescendent"],
"ArchiveLFN": archiveLFN,
}
)
self.addLFNs(archiveFiles, lfnChunk)
request.addOperation(archiveFiles)
# Replicate the Tarball, ArchiveFiles will upload it
if self.switches.get("ReplicateTarball"):
replicateAndRegisterTarBall = Operation()
replicateAndRegisterTarBall.Type = "ReplicateAndRegister"
replicateAndRegisterTarBall.TargetSE = self.targetSE
opFile = File()
opFile.LFN = archiveLFN
replicateAndRegisterTarBall.addFile(opFile)
request.addOperation(replicateAndRegisterTarBall)
if self.switches.get("CheckMigration"):
checkMigrationTarBall = Operation()
checkMigrationTarBall.Type = "CheckMigration"
migrationTarget = self.targetSE if self.switches.get("ReplicateTarball") else self.switches["TarballSE"]
checkMigrationTarBall.TargetSE = migrationTarget
opFile = File()
opFile.LFN = archiveLFN
checkMigrationTarBall.addFile(opFile)
request.addOperation(checkMigrationTarBall)
# Register Archive Replica for LFNs
if self.switches.get("ArchiveSE"):
registerArchived = Operation()
registerArchived.Type = "RegisterReplica"
registerArchived.TargetSE = self.switches.get("ArchiveSE")
self.addLFNs(registerArchived, lfnChunk, addPFN=True)
request.addOperation(registerArchived)
# Remove all Other Replicas for LFNs
if self.switches.get("RemoveReplicas"):
removeArchiveReplicas = Operation()
removeArchiveReplicas.Type = "RemoveReplica"
removeArchiveReplicas.TargetSE = ",".join(self.replicaSEs)
self.addLFNs(removeArchiveReplicas, lfnChunk)
request.addOperation(removeArchiveReplicas)
# Remove all Replicas for LFNs
if self.switches.get("RemoveFiles"):
removeArchiveFiles = Operation()
removeArchiveFiles.Type = "RemoveFile"
self.addLFNs(removeArchiveFiles, lfnChunk)
request.addOperation(removeArchiveFiles)
# Remove Original tarball replica
if self.switches.get("ReplicateTarball"):
removeTarballOrg = Operation()
removeTarballOrg.Type = "RemoveReplica"
removeTarballOrg.TargetSE = self.sourceSEs[0]
opFile = File()
opFile.LFN = archiveLFN
removeTarballOrg.addFile(opFile)
request.addOperation(removeTarballOrg)
return request
def checkArchive(self, archiveLFN):
"""Check that archiveLFN does not exist yet."""
sLog.notice("Using Tarball: %s" % archiveLFN)
exists = returnSingleResult(self.fcClient.isFile(archiveLFN))
sLog.debug("Checking for Tarball existence %r" % exists)
if exists["OK"] and exists["Value"]:
raise RuntimeError("Tarball %r already exists" % archiveLFN)
sLog.debug("Checking permissions for %r" % archiveLFN)
hasAccess = returnSingleResult(self.fcClient.hasAccess(archiveLFN, "addFile"))
if not archiveLFN or not hasAccess["OK"] or not hasAccess["Value"]:
sLog.error("Error checking tarball location: %r" % hasAccess)
raise ValueError('%s is not a valid path, parameter "Name" must be correct' % archiveLFN)
def _checkReplicaSites(self, request, lfnChunk):
"""Ensure that all lfns can be found at the SourceSE, otherwise add replication operation to request.
If SourceOnly is set just rejetct those LFNs.
"""
resReplica = self.fcClient.getReplicas(lfnChunk)
if not resReplica["OK"]:
sLog.error("Failed to get replica information:", resReplica["Message"])
raise RuntimeError("Failed to get replica information")
atSource = []
notAt = []
failed = []
sourceSE = self.sourceSEs[0]
for lfn, replInfo in resReplica["Value"]["Successful"].items():
if sourceSE in replInfo:
atSource.append(lfn)
else:
sLog.notice("WARN: LFN %r not found at source, only at: %s" % (lfn, ",".join(replInfo.keys())))
notAt.append(lfn)
for lfn, errorMessage in resReplica["Value"]["Failed"].items():
sLog.error("Failed to get replica info", "%s: %s" % (lfn, errorMessage))
failed.append(lfn)
if failed:
raise RuntimeError("Failed to get replica information")
sLog.notice("Found %d files to replicate" % len(notAt))
if not notAt:
return
if notAt and self.switches.get("AllowReplication"):
self._replicateSourceFiles(request, notAt)
else:
raise RuntimeError("Not all files are at the Source, exiting")
def _replicateSourceFiles(self, request, lfns):
"""Create the replicateAndRegisterRequest.
:param request: The request to add the operation to
:param lfns: list of LFNs
"""
registerSource = Operation()
registerSource.Type = "ReplicateAndRegister"
registerSource.TargetSE = self.sourceSEs[0]
self.addLFNs(registerSource, lfns, addPFN=True)
request.addOperation(registerSource)
@Script()
def main():
try:
CAR = CreateArchiveRequest()
CAR.run()
except Exception as e:
if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE:
sLog.exception("Failed to create Archive Request")
else:
sLog.error("ERROR: Failed to create Archive Request:", str(e))
exit(1)
exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_create_archive_request.py
|
Python
|
gpl-3.0
| 22,616
|
[
"DIRAC"
] |
b82530ad99bd8120dc7d5b0dd8d70c9786221a464018a695bd23dafbb95730e1
|
"""
Hidden Markov model representation.
"""
import numpy as np
from bhmm.util.logger import logger
__author__ = "John D. Chodera, Frank Noe"
__copyright__ = "Copyright 2015, John D. Chodera and Frank Noe"
__credits__ = ["John D. Chodera", "Frank Noe"]
__license__ = "LGPL"
__maintainer__ = "John D. Chodera"
__email__="jchodera AT gmail DOT com"
from msmtools.estimation import count_matrix
class HMM(object):
r""" Hidden Markov model (HMM).
This class is used to represent an HMM. This could be a maximum-likelihood HMM or a sampled HMM from a
Bayesian posterior.
Parameters
----------
Tij : np.array with shape (nstates, nstates), optional, default=None
Row-stochastic transition matrix among states.
output_model : :class:`bhmm.OutputModel`
The output model for the states.
lag : int, optional, default=1
Lag time (optional) used to estimate the HMM. Used to compute relaxation timescales.
Pi : np.array with shape (nstates), optional, default=None
The initial state vector. Required when stationary=False
stationary : bool, optional, default=True
If true, the initial distribution is equal to the stationary distribution of the transition matrix
If false, the initial distribution must be given as Pi
reversible : bool, optional, default=True
If true, the transition matrix is reversible.
Examples
--------
>>> # Gaussian HMM
>>> nstates = 2
>>> Tij = np.array([[0.8, 0.2], [0.5, 0.5]])
>>> from bhmm import GaussianOutputModel
>>> output_model = GaussianOutputModel(nstates, means=[-1, +1], sigmas=[1, 1])
>>> model = HMM(Tij, output_model)
>>> # Discrete HMM
>>> nstates = 2
>>> Tij = np.array([[0.8, 0.2], [0.5, 0.5]])
>>> from bhmm import DiscreteOutputModel
>>> output_model = DiscreteOutputModel([[0.5, 0.1, 0.4], [0.2, 0.3, 0.5]])
>>> model = HMM(Tij, output_model)
"""
def __init__(self, Tij, output_model, lag=1, Pi=None, stationary=True, reversible=True):
# set number of states
self._nstates = np.array(Tij).shape[0]
# lag time
self._lag = lag
# output model
self.output_model = output_model
# hidden state trajectories are optional
self.hidden_state_trajectories = None
# parameters
self._stationary = stationary
self._reversible = reversible
# update numbers
self.update(Tij, Pi)
def update(self, Tij, Pi=None):
r""" Updates the transition matrix and recomputes all derived quantities """
# EMMA imports
from msmtools import analysis as msmana
# save a copy of the transition matrix
self._Tij = np.array(Tij)
assert msmana.is_transition_matrix(self._Tij), 'Given transition matrix is not a stochastic matrix'
assert self._Tij.shape[0] == self._nstates, 'Given transition matrix has unexpected number of states '
# initial / stationary distribution
if (Pi is not None):
assert np.all(Pi >= 0), 'Given initial distribution contains negative elements.'
Pi = np.array(Pi) / np.sum(Pi) # ensure normalization and make a copy
if (self._stationary):
pT = msmana.stationary_distribution(self._Tij)
if Pi is None: # stationary and no stationary distribution fixed, so computing it from trans. mat.
self._Pi = pT
else: # stationary but stationary distribution is fixed, so the transition matrix must be consistent
assert np.allclose(Pi, pT), 'Stationary HMM requested, but given distribution is not the ' \
'stationary distribution of the given transition matrix.'
self._Pi = Pi
else:
if Pi is None: # no initial distribution given, so use stationary distribution anyway
self._Pi = msmana.stationary_distribution(self._Tij)
else:
self._Pi = Pi
# reversible
if self._reversible:
assert msmana.is_reversible(Tij), 'Reversible HMM requested, but given transition matrix is not reversible.'
# try to do eigendecomposition by default, because it's very cheap for hidden transition matrices
from scipy.linalg import LinAlgError
try:
if self._reversible:
self._R, self._D, self._L = msmana.rdl_decomposition(self._Tij, norm='reversible')
# everything must be real-valued
self._R = self._R.real
self._D = self._D.real
self._L = self._L.real
else:
self._R, self._D, self._L = msmana.rdl_decomposition(self._Tij, norm='standard')
self._eigenvalues = np.diag(self._D)
self._spectral_decomp_available = True
except LinAlgError:
logger().warn('Eigendecomposition failed for transition matrix\n'+str(self._Tij)+
'\nspectral properties will not be available')
self._spectral_decomp_available = False
def __repr__(self):
from bhmm.output_models import OutputModel
if issubclass(self.__class__, OutputModel):
outrepr = repr(OutputModel.__repr__(self))
else:
outrepr = repr(self.output_model)
""" Returns a string representation of the HMM """
return "HMM(%d, %s, %s, Pi=%s, stationary=%s, reversible=%s)" % (self._nstates,
repr(self._Tij),
outrepr,
repr(self._Pi),
repr(self._stationary),
repr(self._reversible))
def __str__(self):
""" Returns a human-readable string representation of the HMM """
output = 'Hidden Markov model\n'
output += '-------------------\n'
output += 'nstates: %d\n' % self._nstates
output += 'Tij:\n'
output += str(self._Tij) + '\n'
output += 'Pi:\n'
output += str(self._Pi) + '\n'
output += 'output model:\n'
from bhmm.output_models import OutputModel
if issubclass(self.__class__, OutputModel):
output += str(OutputModel.__str__(self))
else:
output += str(self.output_model)
output += '\n'
return output
def _assert_spectral_decomposition(self):
if not self._spectral_decomp_available:
raise RuntimeError('Trying to access eigenvalues or eigenvectors, but spectral decomposition is not '
'available.')
@property
def lag(self):
r""" Lag time of the model, i.e. the number of observated trajectory steps made by the transition matrix """
return self._lag
@property
def is_reversible(self):
r""" Whether the HMM is reversible """
return self._reversible
@property
def is_stationary(self):
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix. """
return self._stationary
@property
def nstates(self):
r""" The number of hidden states """
return self._nstates
@property
def initial_distribution(self):
r""" The initial distribution of the hidden states """
return self._Pi
@property
def stationary_distribution(self):
r""" The stationary distribution of hidden states.
Raises
------
ValueError if the HMM is not stationary
"""
if self._stationary:
return self._Pi
else:
raise ValueError('HMM is not stationary')
@property
def transition_matrix(self):
r""" The hidden transition matrix """
return self._Tij
@property
def eigenvalues(self):
r""" Hidden transition matrix eigenvalues
Returns
-------
ts : ndarray(m)
transition matrix eigenvalues :math:`\lambda_i, i = 1,...,k`., sorted by descending norm.
"""
self._assert_spectral_decomposition()
return self._eigenvalues
@property
def eigenvectors_left(self):
r""" Left eigenvectors of the hidden transition matrix
Returns
-------
L : ndarray(nstates,nstates)
left eigenvectors in a row matrix. l_ij is the j'th component of the i'th left eigenvector
"""
self._assert_spectral_decomposition()
return self._L
@property
def eigenvectors_right(self):
r""" Right eigenvectors of the hidden transition matrix
Returns
-------
R : ndarray(nstates,nstates)
right eigenvectors in a column matrix. r_ij is the i'th component of the j'th right eigenvector
"""
self._assert_spectral_decomposition()
return self._R
@property
def timescales(self):
r""" Relaxation timescales of the hidden transition matrix
Returns
-------
ts : ndarray(m)
relaxation timescales in units of the input trajectory time step,
defined by :math:`-tau / ln | \lambda_i |, i = 2,...,nstates`, where
:math:`\lambda_i` are the hidden transition matrix eigenvalues.
"""
from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales
ts = _timescales(self._eigenvalues, tau=self._lag)
return ts[1:]
@property
def lifetimes(self):
r""" Lifetimes of states of the hidden transition matrix
Returns
-------
l : ndarray(nstates)
state lifetimes in units of the input trajectory time step,
defined by :math:`-tau / ln | p_{ii} |, i = 1,...,nstates`, where
:math:`p_{ii}` are the diagonal entries of the hidden transition matrix.
"""
return -self._lag / np.log(np.diag(self.transition_matrix))
def count_matrix(self, dtype=np.float64):
# TODO: does this belong here or to the BHMM sampler, or in a subclass containing HMM with data?
"""Compute the transition count matrix from hidden state trajectory.
Parameters
----------
dtype : numpy.dtype, optional, default=numpy.float64
The numpy dtype to use for the count matrix.
Returns
-------
C : numpy.array with shape (nstates,nstates)
C[i,j] is the number of transitions observed from state i to state j
Raises
------
RuntimeError
A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
Examples
--------
"""
if self.hidden_state_trajectories is None:
raise RuntimeError('HMM model does not have a hidden state trajectory.')
C = count_matrix(self.hidden_state_trajectories, 1, nstates=self._nstates)
#C = np.zeros((self._nstates,self._nstates), dtype=dtype)
#for S in self.hidden_state_trajectories:
# for t in range(len(S)-1):
# C[S[t],S[t+1]] += 1
return C.toarray()
# def emission_probability(self, state, observation):
# """Compute the emission probability of an observation from a given state.
#
# Parameters
# ----------
# state : int
# The state index for which the emission probability is to be computed.
#
# Returns
# -------
# Pobs : float
# The probability (or probability density, if continuous) of the observation.
#
# TODO
# ----
# * Vectorize
#
# Examples
# --------
#
# Compute the probability of observing an emission of 0 from state 0.
#
# >>> from bhmm import testsystems
# >>> model = testsystems.dalton_model(nstates=3)
# >>> state_index = 0
# >>> observation = 0.0
# >>> Pobs = model.emission_probability(state_index, observation)
#
# """
# return self.output_model.p_o_i(observation, state)
# def log_emission_probability(self, state, observation):
# """Compute the log emission probability of an observation from a given state.
#
# Parameters
# ----------
# state : int
# The state index for which the emission probability is to be computed.
#
# Returns
# -------
# log_Pobs : float
# The log probability (or probability density, if continuous) of the observation.
#
# TODO
# ----
# * Vectorize
#
# Examples
# --------
#
# Compute the log probability of observing an emission of 0 from state 0.
#
# >>> from bhmm import testsystems
# >>> model = testsystems.dalton_model(nstates=3)
# >>> state_index = 0
# >>> observation = 0.0
# >>> log_Pobs = model.log_emission_probability(state_index, observation)
#
# """
# return self.output_model.log_p_o_i(observation, state)
def collect_observations_in_state(self, observations, state_index):
# TODO: this would work well in a subclass with data
"""Collect a vector of all observations belonging to a specified hidden state.
Parameters
----------
observations : list of numpy.array
List of observed trajectories.
state_index : int
The index of the hidden state for which corresponding observations are to be retrieved.
dtype : numpy.dtype, optional, default=numpy.float64
The numpy dtype to use to store the collected observations.
Returns
-------
collected_observations : numpy.array with shape (nsamples,)
The collected vector of observations belonging to the specified hidden state.
Raises
------
RuntimeError
A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
"""
if not self.hidden_state_trajectories:
raise RuntimeError('HMM model does not have a hidden state trajectory.')
dtype = observations[0].dtype
collected_observations = np.array([], dtype=dtype)
for (s_t, o_t) in zip(self.hidden_state_trajectories, observations):
indices = np.where(s_t == state_index)[0]
collected_observations = np.append(collected_observations, o_t[indices])
return collected_observations
def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32):
"""Generate a synthetic state trajectory.
Parameters
----------
nsteps : int
Number of steps in the synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from the intrinsic
initial distribution.
start : int
starting state. Exclusive with initial_Pi
stop : int
stopping state. Trajectory will terminate when reaching the stopping state before length number of steps.
dtype : numpy.dtype, optional, default=numpy.int32
The numpy dtype to use to store the synthetic trajectory.
Returns
-------
states : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic state trajectory of a specified length.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> states = model.generate_synthetic_state_trajectory(nsteps=100)
"""
# consistency check
if initial_Pi is not None and start is not None:
raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.')
# Generate first state sample.
if start is None:
if initial_Pi is not None:
start = np.random.choice(range(self._nstates), size=1, p=initial_Pi)
else:
start = np.random.choice(range(self._nstates), size=1, p=self._Pi)
# Generate and return trajectory
from msmtools import generation as msmgen
traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1)
return traj.astype(dtype)
def generate_synthetic_observation(self, state):
"""Generate a synthetic observation from a given state.
Parameters
----------
state : int
The index of the state from which the observable is to be sampled.
Returns
-------
observation : float
The observation from the given state.
Examples
--------
Generate a synthetic observation from a single state.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> observation = model.generate_synthetic_observation(0)
"""
return self.output_model.generate_observation_from_state(state)
def generate_synthetic_observation_trajectory(self, length, initial_Pi=None):
"""Generate a synthetic realization of observables.
Parameters
----------
length : int
Length of synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from equilibrium.
Returns
-------
o_t : np.array of shape (nstates,) of dtype=np.float32
The trajectory of observations.
s_t : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic observation trajectory for an equilibrium realization.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> [o_t, s_t] = model.generate_synthetic_observation_trajectory(length=100)
Use an initial nonequilibrium distribution.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> [o_t, s_t] = model.generate_synthetic_observation_trajectory(length=100, initial_Pi=np.array([1,0,0]))
"""
# First, generate synthetic state trajetory.
s_t = self.generate_synthetic_state_trajectory(length, initial_Pi=initial_Pi)
# Next, generate observations from these states.
o_t = self.output_model.generate_observation_trajectory(s_t)
return [o_t, s_t]
def generate_synthetic_observation_trajectories(self, ntrajectories, length, initial_Pi=None):
"""Generate a number of synthetic realization of observables from this model.
Parameters
----------
ntrajectories : int
The number of trajectories to be generated.
length : int
Length of synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from equilibrium.
Returns
-------
O : list of np.array of shape (nstates,) of dtype=np.float32
The trajectories of observations
S : list of np.array of shape (nstates,) of dtype=np.int32
The trajectories of hidden states
Examples
--------
Generate a number of synthetic trajectories.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> [O, S] = model.generate_synthetic_observation_trajectories(ntrajectories=10, length=100)
Use an initial nonequilibrium distribution.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model(nstates=3)
>>> [O, S] = model.generate_synthetic_observation_trajectories(ntrajectories=10, length=100, initial_Pi=np.array([1,0,0]))
"""
O = list() # observations
S = list() # state trajectories
for trajectory_index in range(ntrajectories):
[o_t, s_t] = self.generate_synthetic_observation_trajectory(length=length, initial_Pi=initial_Pi)
O.append(o_t)
S.append(s_t)
return [O, S]
|
marscher/bhmm
|
bhmm/hmm/generic_hmm.py
|
Python
|
lgpl-3.0
| 20,937
|
[
"Gaussian"
] |
f17d44325894695eb3dd70c15b207d67574c046f9788e4865148f865b6ec04aa
|
from __future__ import unicode_literals, division, absolute_import
import logging
import re
import time
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
from sqlalchemy import Table, Column, Integer, String, Unicode, Date, DateTime, Time, or_, func
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema
from flexget import plugin
from flexget import options
from flexget.db_schema import upgrade
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.utils import requests
from flexget.utils.tools import TimedDict
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimplePersistence
from flexget.logger import console
Base = db_schema.versioned_base('api_trakt', 3)
log = logging.getLogger('api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api-v2launch.trakt.tv/'
PIN_URL = 'http://trakt.tv/pin/346'
# Stores the last time we checked for updates for shows/movies
updated = SimplePersistence('api_trakt')
# Oauth account authentication
class TraktUserAuth(Base):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console('Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in '
'{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0))
log.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(get_api_url('oauth/device/token'), data=data,
raise_status=False)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
log.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e.args[0]))
def token_auth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e.args[0]))
def get_access_token(account, token=None, refresh=False, re_auth=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if acc and (refresh or datetime.now() >= acc.expires) and not re_auth:
log.debug('Using refresh token to re-authorize account %s.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_auth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console('Warning: PIN authorization has been deprecated. Use Device Authorization instead.')
data['code'] = token
data['grant_type'] = 'authorization_code'
data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob'
token_dict = token_auth(data)
else:
log.debug('No pin specified for an unknown account %s. Attempting to authorize device.', account)
token_dict = device_auth()
try:
access_token = token_dict['access_token']
refresh_token = token_dict['refresh_token']
created_at = token_dict.get('created_at', time.time())
expires_in = token_dict['expires_in']
if acc:
acc.access_token = access_token
acc.refresh_token = refresh_token
acc.created = token_created_date(created_at)
acc.expires = token_expire_date(expires_in)
else:
acc = TraktUserAuth(account, access_token, refresh_token, created_at,
expires_in)
session.add(acc)
return access_token
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e.args[0]))
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': 2,
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], basestring):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(unicode, endpoint))
return url
@upgrade('api_trakt')
def upgrade_database(ver, session):
if ver <= 2:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode)
show_genres_table = Table('trakt_show_genres', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(show_genres_table)
movie_genres_table = Table('trakt_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(movie_genres_table)
def get_db_genres(genres, session):
"""Takes a list of genres as strings, returns the database instances for them."""
db_genres = []
for genre in genres:
genre = genre.replace('-', ' ')
db_genre = session.query(TraktGenre).filter(TraktGenre.name == genre).first()
if not db_genre:
db_genre = TraktGenre(name=genre)
session.add(db_genre)
db_genres.append(db_genre)
return db_genres
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode, nullable=False)
imdb_id = Column(Unicode)
trakt_id = Column(Unicode)
tmdb_id = Column(Unicode)
def __init__(self, name, trakt_id, imdb_id=None, tmdb_id=None):
self.name = name
self.trakt_id = trakt_id
self.imdb_id = imdb_id
self.tmdb_id = tmdb_id
show_actors_table = Table('trakt_show_actors', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(show_actors_table)
movie_actors_table = Table('trakt_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(movie_actors_table)
def get_db_actors(ident, style):
actors = []
url = get_api_url(style + 's', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url).json()
with Session() as session:
for result in results.get('cast'):
name = result.get('person').get('name')
ids = result.get('person').get('ids')
trakt_id = ids.get('trakt')
imdb_id = ids.get('imdb')
tmdb_id = ids.get('tmdb')
actor = session.query(TraktActor).filter(TraktActor.trakt_id == trakt_id).first()
if not actor:
actor = TraktActor(name, trakt_id, imdb_id, tmdb_id)
actors.append(actor)
return actors
except requests.RequestException as e:
log.debug('Error searching for actors for trakt id %s', e)
return
def list_actors(actors):
res = {}
for actor in actors:
info = {}
info['name'] = actor.name
info['imdb_id'] = str(actor.imdb_id)
info['tmdb_id'] = str(actor.tmdb_id)
res[str(actor.trakt_id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode):
super(TraktEpisode, self).__init__()
self.update(trakt_episode)
def update(self, trakt_episode):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
aired_episodes = Column(Integer)
episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def __init__(self, trakt_show, session):
super(TraktShow, self).__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('air_time'):
self.air_time = dateutil_parse(trakt_show.get('air_time'), ignoretz=True)
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year', 'air_day',
'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes']:
setattr(self, col, trakt_show.get(col))
self.genres[:] = get_db_genres(trakt_show.get('genres', []), session)
self.cached_at = datetime.now()
def get_episode(self, season, number, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first()
if not episode or self.expired:
url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full')
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
log.debug('Episode %s %s not found in cache, looking up from trakt.', season, number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data)
else:
episode = TraktEpisode(data)
self.episodes.append(episode)
return episode
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
log.debug('cached_at is None: %s', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('show `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super(TraktMovie, self).__init__()
self.update(trakt_movie, session)
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in ['title', 'overview', 'runtime', 'rating', 'votes', 'language', 'tagline', 'year']:
setattr(self, col, trakt_movie.get(col))
if self.released:
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True)
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres[:] = get_db_genres(trakt_movie.get('genres', []), session)
self.cached_at = datetime.now()
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
log.debug('updated_at is None: %s', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'[\s(]([12]\d{3})\)?$', title)
if match:
title = title[:match.start()].strip()
year = int(match.group(1))
else:
year = None
return title, year
@with_session
def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None, session=None):
"""
Get the cached info for a given show/movie from the database.
:param type: Either 'show' or 'movie'
"""
ids = {
'id': trakt_id,
'slug': trakt_slug,
'tmdb_id': tmdb_id,
'imdb_id': imdb_id,
}
if style == 'show':
ids['tvdb_id'] = tvdb_id
ids['tvrage_id'] = tvrage_id
model = TraktShow
else:
model = TraktMovie
result = None
if any(ids.values()):
result = session.query(model).filter(
or_(getattr(model, col) == val for col, val in ids.iteritems() if val)).first()
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(model).filter(model.title == title)
if year:
query = query.filter(model.year == year)
result = query.first()
return result
def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None):
"""Returns the matching media object from trakt api."""
# TODO: Better error messages
# Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24)
trakt_id = trakt_id or trakt_slug
req_session = get_session()
last_search_query = None # used if no results are found
last_search_type = None
if not trakt_id:
# Try finding trakt_id based on other ids
ids = {
'imdb': imdb_id,
'tmdb': tmdb_id
}
if style == 'show':
ids['tvdb'] = tvdb_id
ids['tvrage'] = tvrage_id
for id_type, identifier in ids.iteritems():
if not identifier:
continue
try:
last_search_query = identifier
last_search_type = id_type
log.debug('Searching with params: %s=%s', id_type, identifier)
results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json()
except requests.RequestException as e:
log.debug('Error searching for trakt id %s', e)
continue
for result in results:
if result['type'] != style:
continue
trakt_id = result[style]['ids']['trakt']
break
if trakt_id:
break
if not trakt_id and title:
last_search_query = title
last_search_type = 'title'
# Try finding trakt id based on title and year
if style == 'show':
parsed_title, y = split_title_year(title)
y = year or y
else:
title_parser = get_plugin_by_name('parsing').instance.parse_movie(title)
y = year or title_parser.year
parsed_title = title_parser.name
try:
params = {'query': parsed_title, 'type': style, 'year': y}
log.debug('Searching with params: %s', ', '.join('{}={}'.format(k, v) for (k, v) in params.items()))
results = req_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[style]['year'] != year:
continue
if parsed_title.lower() == result[style]['title'].lower():
trakt_id = result[style]['ids']['trakt']
break
# grab the first result if there is no exact match
if not trakt_id and results:
trakt_id = results[0][style]['ids']['trakt']
if not trakt_id:
raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query))
# Get actual data from trakt
try:
return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full'}).json()
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
def update_collection_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'collection', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No collection data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['collection'][style_ident]
log.verbose('Received %d records from trakt.tv %s\'s collection', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['collected_at'] = dateutil_parse(movie['collected_at'], ignoretz=True)
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['collected_at'] = dateutil_parse(series['last_collected_at'], ignoretz=True)
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_watched_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'watched', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No watched data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['watched'][style_ident]
log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True)
cache[movie_id]['plays'] = movie['plays']
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True)
cache[series_id]['plays'] = series['plays']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def get_user_cache(username=None, account=None):
identifier = '{}|{}'.format(account, username or 'me')
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('movies', {})
return ApiTrakt.user_cache[identifier]
class ApiTrakt(object):
user_cache = TimedDict(cache_time='15 minutes')
@staticmethod
@with_session
def lookup_series(session=None, only_cached=None, **lookup_params):
series = get_cached('show', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not series and title:
found = session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first()
if found and found.series:
log.debug('Found %s in previous search results as %s', title, found.series.title)
series = found.series
if only_cached:
if series:
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
return series
try:
trakt_show = get_trakt('show', **lookup_params)
except LookupError as e:
if series:
log.debug('Error refreshing show data from trakt, using cached. %s', e)
return series
raise
series = session.query(TraktShow).filter(TraktShow.id == trakt_show['ids']['trakt']).first()
if series:
series.update(trakt_show, session)
else:
series = TraktShow(trakt_show, session)
session.add(series)
if series and title.lower() == series.title.lower():
return series
elif series and not found:
if not session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktShowSearchResult(search=title, series=series))
elif series and found:
log.debug('Updating search result in db')
found.series = series
return series
@staticmethod
@with_session
def lookup_movie(session=None, only_cached=None, **lookup_params):
movie = get_cached('movie', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not movie and title:
found = session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first()
if found and found.movie:
log.debug('Found %s in previous search results as %s', title, found.movie.title)
movie = found.movie
if only_cached:
if movie:
return movie
raise LookupError('Movie %s not found from cache' % lookup_params)
if movie and not movie.expired:
return movie
try:
trakt_movie = get_trakt('movie', **lookup_params)
except LookupError as e:
if movie:
log.debug('Error refreshing movie data from trakt, using cached. %s', e)
return movie
raise
movie = session.query(TraktMovie).filter(TraktMovie.id == trakt_movie['ids']['trakt']).first()
if movie:
movie.update(trakt_movie, session)
else:
movie = TraktMovie(trakt_movie, session)
session.add(movie)
if movie and title.lower() == movie.title.lower():
return movie
if movie and not found:
if not session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktMovieSearchResult(search=title, movie=movie))
elif movie and found:
log.debug('Updating search result in db')
found.movie = movie
return movie
@staticmethod
def collected(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['collection'][style_ident]:
log.debug('No collection found in cache.')
update_collection_cache(style_ident, username=username, account=account)
if not cache['collection'][style_ident]:
log.warning('No collection data returned from trakt.')
return
in_collection = False
cache = cache['collection'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_collected_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
in_collection = number_of_collected_episodes >= trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
in_collection = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
in_collection = True
log.debug('The result for entry "%s" is: %s', title,
'Owned' if in_collection else 'Not owned')
return in_collection
@staticmethod
def watched(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['watched'][style_ident]:
log.debug('No watched history found in cache.')
update_watched_cache(style_ident, username=username, account=account)
if not cache['watched'][style_ident]:
log.warning('No watched data returned from trakt.')
return
watched = False
cache = cache['watched'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_watched_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
watched = number_of_watched_episodes == trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
watched = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
watched = True
log.debug('The result for entry "%s" is: %s', title,
'Watched' if watched else 'Not watched')
return watched
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def do_cli(manager, options):
if options.action == 'auth':
if not (options.account):
console('You must specify an account (local identifier) so we know where to save your access token!')
return
try:
get_access_token(options.account, options.pin, re_auth=True)
console('Successfully authorized Flexget app on Trakt.tv. Enjoy!')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'show':
with Session() as session:
if not options.account:
# Print all accounts
accounts = session.query(TraktUserAuth).all()
if not accounts:
console('No trakt authorizations stored in database.')
return
console('{:-^21}|{:-^28}|{:-^28}'.format('Account', 'Created', 'Expires'))
for auth in accounts:
console('{:<21}|{:>28}|{:>28}'.format(
auth.account, auth.created.strftime('%Y-%m-%d'), auth.expires.strftime('%Y-%m-%d')))
return
# Show a specific account
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == options.account).first()
if acc:
console('Authorization expires on %s' % acc.expires)
else:
console('Flexget has not been authorized to access your account.')
elif options.action == 'refresh':
if not options.account:
console('Please specify an account')
return
try:
get_access_token(options.account, refresh=True)
console('Successfully refreshed your access token.')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'delete':
if not options.account:
console('Please specify an account')
return
try:
delete_account(options.account)
console('Successfully deleted your access token.')
return
except plugin.PluginError as e:
console('Deletion failed: %s' % e)
@event('options.register')
def register_parser_arguments():
acc_text = 'local identifier which should be used in your config to refer these credentials'
# Register subcommand
parser = options.register_command('trakt', do_cli, help='view and manage trakt authentication.')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action')
auth_parser = subparsers.add_parser('auth', help='authorize Flexget to access your Trakt.tv account')
auth_parser.add_argument('account', metavar='<account>', help=acc_text)
auth_parser.add_argument('pin', metavar='<pin>', help='get this by authorizing FlexGet to use your trakt account '
'at %s' % PIN_URL, nargs='?')
show_parser = subparsers.add_parser('show', help='show expiration date for Flexget authorization(s) (don\'t worry, '
'they will automatically refresh when expired)')
show_parser.add_argument('account', metavar='<account>', nargs='?', help=acc_text)
refresh_parser = subparsers.add_parser('refresh', help='manually refresh your access token associated with your'
' --account <name>')
refresh_parser.add_argument('account', metavar='<account>', help=acc_text)
delete_parser = subparsers.add_parser('delete', help='delete the specified <account> name from local database')
delete_parser.add_argument('account', metavar='<account>', help=acc_text)
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
|
antivirtel/Flexget
|
flexget/plugins/api_trakt.py
|
Python
|
mit
| 43,235
|
[
"VisIt"
] |
bba9c662fbc41560969c574b84c748ccde1895cd4d3cda9c84daf8d1ed9b14d1
|
from __future__ import print_function, division
import os
from os.path import join
import tempfile
import shutil
import io
from io import BytesIO
try:
from subprocess import STDOUT, CalledProcessError, check_output
except ImportError:
pass
from sympy.core.compatibility import unicode, u_decode
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from .latex import latex
from sympy.utilities.decorator import doctest_depends_on
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=7018).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=7083).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with io.open(join(workdir, 'texput.tex'), 'w', encoding='utf-8') as fh:
fh.write(unicode(latex_main) % u_decode(latex_string))
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
# Avoid showing a cmd.exe window when running this
# on Windows
if os.name == 'nt':
creation_flag = 0x08000000 # CREATE_NO_WINDOW
else:
creation_flag = 0 # Default value
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'],
cwd=workdir,
stderr=STDOUT,
creationflags=creation_flag)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"],
"svg": ["--no-fonts"],
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"],
"svg": ["-o", "texput.svg", "texput.dvi"],
}
if output == "svg":
cmd = ["dvisvgm"]
else:
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
# Avoid showing a cmd.exe window when running this
# on Windows
if os.name == 'nt':
creation_flag = 0x08000000 # CREATE_NO_WINDOW
else:
creation_flag = 0 # Default value
check_output(cmd, cwd=workdir, stderr=STDOUT,
creationflags=creation_flag)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
config = gl.Config(double_buffer=False)
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False,
config=config
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
# Avoid showing a cmd.exe window when running this
# on Windows
if os.name == 'nt':
creation_flag = 0x08000000 # CREATE_NO_WINDOW
else:
creation_flag = 0 # Default value
check_output([viewer, src], cwd=workdir, stderr=STDOUT,
creationflags=creation_flag)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
|
wxgeo/geophar
|
wxgeometrie/sympy/printing/preview.py
|
Python
|
gpl-2.0
| 13,192
|
[
"VisIt"
] |
d6861cbe6a7850a76928086a76284abd722df3974608a87b2ea69c1d61870e94
|
# csv2json.py
#
# Copyright 2009 Brian Gershon -- briang at webcollective.coop
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://djangosnippets.org/snippets/1680/
import sys
import getopt
import csv
from os.path import dirname
import simplejson
try:
script, input_file_name, model_name = sys.argv
except ValueError:
print("\nRun via:\n\n%s input_file_name model_name" % sys.argv[0])
# print("\ne.g. %s airport.csv app_airport.Airport" % sys.argv[0])
print("\ne.g. %s categorias.csv core.Category" % sys.argv[0])
print(
"\nNote: input_file_name should be a path relative to where this script is.")
sys.exit()
in_file = dirname(__file__) + input_file_name
out_file = dirname(__file__) + input_file_name + ".json"
print("Converting %s from CSV to JSON as %s" % (in_file, out_file))
f = open(in_file, 'r')
fo = open(out_file, 'w')
reader = csv.reader(f)
header_row = []
entries = []
# debugging
# if model_name == 'app_airport.Airport':
# import pdb ; pdb.set_trace( )
for row in reader:
if not header_row:
header_row = row
continue
pk = row[0]
model = model_name
fields = {}
for i in range(len(row) - 1):
active_field = row[i + 1]
# convert numeric strings into actual numbers by converting to either
# int or float
if active_field.isdigit():
try:
new_number = int(active_field)
except ValueError:
new_number = float(active_field)
fields[header_row[i + 1]] = new_number
else:
fields[header_row[i + 1]] = active_field.strip()
row_dict = {}
row_dict["pk"] = int(pk)
row_dict["model"] = model_name
row_dict["fields"] = fields
entries.append(row_dict)
fo.write("%s" % simplejson.dumps(entries, indent=4))
f.close()
fo.close()
|
rg3915/django-example
|
myproject/fixtures/csv2json.py
|
Python
|
mit
| 2,352
|
[
"Brian"
] |
210023c4da00126358a10b0fd1b7cd1a0b90e53c0686bc53cfad82bcda4608b9
|
from __future__ import print_function
import sys
import os
import numpy as np
import pylab as plt
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.plotutils import PlotSequence, plothist, loghist
from astrometry.util.util import Tan
from legacypipe.survey import LegacySurveyData, imsave_jpeg, get_rgb
from tractor import *
from legacypipe.forced_photom_decam import run_forced_phot
def main():
W = H = 50
pixscale = 0.262 / 3600.
band = 'r'
truewcs = Tan(0., 0., W/2., H/2., -pixscale, 0., 0., pixscale,
float(W), float(H))
src = PointSource(RaDecPos(0., 0.,),
NanoMaggies(**{band: 1.}))
src.symmetric_derivs = True
forced_cat = [src]
sig1 = 0.25
flux = 100.
psf_sigma = 2.0
psfnorm = 1./(2. * np.sqrt(np.pi) * psf_sigma)
nsigma = flux * psfnorm / sig1
print('S/N:', nsigma)
v = psf_sigma**2
# Create a pixelized PSF model by rendering the Gaussian on a stamp
xx,yy = np.meshgrid(np.arange(-12,13), np.arange(-12,13))
pp = np.exp(-0.5 * (xx**2 + yy**2) / psf_sigma**2)
pp /= np.sum(pp)
psf = PixelizedPSF(pp)
tim = Image(data=np.zeros((H,W), np.float32),
inverr=np.ones((H,W), np.float32) * 1./sig1,
wcs=ConstantFitsWcs(truewcs),
photocal=LinearPhotoCal(1., band=band),
sky=ConstantSky(0.),
psf=psf)
tim.band = band
tim.sig1 = sig1
#dra_pix = np.linspace(-5, 5, 21)
dra_pix = np.linspace(-2, 2, 21)
ddec_pix = np.zeros_like(dra_pix)
dra2_pix = np.linspace(-5, 5, 21)
ddec2_pix = -0.5 * dra2_pix
ps = PlotSequence('pm')
for dras,ddecs,srcflux in [(dra_pix * pixscale, ddec_pix * pixscale, flux),
(dra_pix * pixscale, ddec_pix * pixscale, flux/2),
(dra2_pix * pixscale, ddec2_pix * pixscale, flux),]:
FF = []
slicex = []
slicey = []
residx = []
residy = []
for dra,ddec in zip(dras,ddecs):
src = PointSource(RaDecPos(0.+dra, 0.+ddec),
NanoMaggies(**{band: srcflux}))
tr = Tractor([tim], [src])
truemod = tr.getModelImage(0)
noise = np.random.normal(size=truemod.shape) * sig1
tim.data = truemod + noise
F = run_forced_phot(forced_cat, tim, ceres=False, derivs=True, do_apphot=False, fixed_also=True) #, ps=ps)
#print('Src:', forced_cat)
t = Tractor([tim], forced_cat)
m = t.getModelImage(0)
mh,mw = m.shape
slicex.append(m[mh//2,:])
slicey.append(m[:,mw//2])
residx.append((m - truemod)[mh//2,:])
residy.append((m - truemod)[:,mw//2])
#F.about()
F.true_dra = dra + np.zeros(len(F))
F.true_ddec = ddec + np.zeros(len(F))
FF.append(F)
F = merge_tables(FF)
plt.clf()
plt.plot(F.true_dra * 3600., F.flux_dra / F.flux * 3600., 'b.', label='RA')
plt.plot(F.true_ddec * 3600., F.flux_ddec / F.flux * 3600., 'g.', label='Dec')
mx = max(max(np.abs(F.true_dra)), max(np.abs(F.true_ddec)))
mx *= 3600.
plt.plot([-mx,mx],[-mx,mx], 'k-', alpha=0.1)
plt.xlabel('True offset (arcsec)')
plt.ylabel('Flux deriv / Flux * 3600 (arcsec)')
plt.legend()
ps.savefig()
plt.clf()
plt.plot(np.hypot(F.true_dra, F.true_ddec) * 3600., F.flux, 'b.',
label='Flux (dra/dec)')
plt.plot(np.hypot(F.true_dra, F.true_ddec) * 3600., F.flux_fixed,
'g.', label='Flux (fixed)')
plt.xlabel('True offset (arcsec)')
plt.ylabel('Flux')
ps.savefig()
plt.clf()
N = len(slicex)
cc = float(N-1)
for i,s in enumerate(slicex):
#plt.plot(s + i*10, 'b-')
rgb = (0,i/cc,1.-i/cc)
#print('rgb', rgb)
plt.plot(s, '-', color=rgb, alpha=0.5)
ps.savefig()
plt.clf()
N = len(residx)
cc = float(N-1)
for i,s in enumerate(residx):
rgb = (0,i/cc,1.-i/cc)
plt.plot(s, '-', color=rgb, alpha=0.5)
ps.savefig()
if __name__ == '__main__':
main()
|
legacysurvey/pipeline
|
py/legacyanalysis/forced-phot-proper-motion.py
|
Python
|
gpl-2.0
| 4,407
|
[
"Gaussian"
] |
d8d8d1521feeb58e0bd134950b9ac4c434812ac8a78c26cb81392d85f3a17006
|
#!/usr/bin/env python3
from LoLIM.stationTimings.autoCorrelator3_plotPulse import processed_data_dir, plot_stations, plot_one_station, plot_one_station_allData, plot_stations_AllData
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/exp_app2/appexp1/lightning_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
timeID = "D20180921T194259.023Z"
processed_data_folder = processed_data_dir(timeID)
known_station_delays = {
'CS001' : 2.22594112593e-06 , ## diff to guess: -2.03808220893e-11
'CS003' : 1.40486809199e-06 , ## diff to guess: 1.38253423356e-12
'CS004' : 4.30660422831e-07 , ## diff to guess: -6.23906907784e-12
'CS006' : 4.34184493916e-07 , ## diff to guess: 2.14758738162e-12
'CS007' : 4.01114093971e-07 , ## diff to guess: 4.13871757537e-12
'CS011' : -5.8507310781e-07 , ## diff to guess: 2.48144545939e-12
'CS013' : -1.81346936224e-06 , ## diff to guess: 6.68830181871e-12
'CS017' : -8.4374268431e-06 , ## diff to guess: 1.77463333928e-11
'CS024' : 2.31979457875e-06 , ## diff to guess: -1.9906186238e-11
'CS026' : -9.23248915119e-06 , ## diff to guess: 3.03730975849e-11
'CS030' : -2.74190858905e-06 , ## diff to guess: 1.39799986061e-11
'CS032' : -1.5759048054e-06 , ## diff to guess: -3.19342188075e-11
'CS101' : -8.16744875658e-06 , ## diff to guess: 5.33916553344e-11
'CS103' : -2.85149677531e-05 , ## diff to guess: 5.70630260641e-11
'CS201' : -1.04838101677e-05 , ## diff to guess: 1.54806624706e-11
'RS205' : 7.00165432704e-06 , ## diff to guess: -1.95632200609e-10
'RS208' : 6.87005906191e-06 , ## diff to guess: -9.82176687537e-10
'CS301' : -7.21061332925e-07 , ## diff to guess: -4.32337696006e-11
'CS302' : -5.36006158172e-06 , ## diff to guess: -1.02567927779e-10
'CS501' : -9.60769807486e-06 , ## diff to guess: 3.95090180021e-11
'RS503' : 6.92169621854e-06 , ## diff to guess: 8.61637302128e-11
'RS210' : 6.77016324355e-06 , ## diff to guess: -1.95983879239e-09
'RS307' : 6.84081748485e-06 , ## diff to guess: -1.40219978817e-09
'RS406' : 6.96855897059e-06 , ## diff to guess: 1.61461113386e-10
'RS407' : 7.03053077398e-06 , ## diff to guess: 4.46846181079e-10
'RS508' : 6.99307140404e-06 , ## diff to guess: 8.81735114658e-10
}
#fname = processed_data_folder + "/pulse_finding/potSource_0.h5"
#loc = [ -25383.6550768 , -8163.11570266 , 0.175016843601 , 0.984046325655 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_1.h5"
#loc =[ -24428.0016061 , -7410.53707223 , 0.175016843552 , 0.984337125506 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []#['146009078']
#fname = processed_data_folder + "/pulse_finding/potSource_2.h5"
#loc = [ -36964.5478698 , -8415.53036896 , -0.421147762007 , 0.984916477788 ]
#polarization = 1
#stations_to_skip = ['CS006']
#bad_antennas = []#['146009078']
#fname = processed_data_folder + "/pulse_finding/potSource_3.h5"
#loc = [ 1531.90572559 , 24244.2591802 , 308.6369871 , 0.985483408847 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_4.h5"
#loc = [ -33510.2161972 , -10657.5606301 , 3938.93020211 , 0.986665863365 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = ['146009078']
#fname = processed_data_folder + "/pulse_finding/potSource_5.h5"
#loc =[ -36910.0475907 , -13662.6502376 , 5228.5489493 , 0.986750427195 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []
#
#fname = processed_data_folder + "/pulse_finding/potSource_6.h5"
#loc = [ -33420.7732969 , -10238.6593789 , 4203.17649729 , 0.987354008232 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = ['001011094']
#fname = processed_data_folder + "/pulse_finding/potSource_7.h5"
#loc = [ -40324.8612313 , -10036.5721567 , 5780.00522225 , 0.987596492976 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = ['147011094', '147001014']
#fname = processed_data_folder + "/pulse_finding/potSource_8.h5"
#loc = [ -39940.661778 , -9889.297566 , 7277.26681281 , 0.988907427765 ]
#polarization = 0
#stations_to_skip = ['RS210']
#bad_antennas = []
fname = processed_data_folder + "/pulse_finding/potSource_9.h5"
loc = [ -39149.3745702 , -9738.89193919 , 7595.47885902 , 0.989187161303 ]
polarization = 0
stations_to_skip = ['RS307']
bad_antennas = ['017009078', '130009078', '130001014']
#fname = processed_data_folder + "/pulse_finding/potSource_11.h5"
#loc = [ -39327.9164621 , -9756.00581782 , 7253.49060651 , 0.989722699084 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_15.h5"
#loc = [ -38264.4688068 , -9377.01594644 , 7248.94353376 , 0.991015075495 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_17.h5"
#loc = [ -37145.1550538 , -11173.7953293 , 8830.13213504 , 0.982066435114 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_18.h5"
#loc = [ -37726.5052238 , -11708.9547491 , 7838.73790662 , 0.981013140205 ]
#polarization = 0
#stations_to_skip = []
#bad_antennas = []
#fname = processed_data_folder + "/pulse_finding/potSource_20.h5"
#loc = [ -38575.0533521 , -10117.1002307 , 7045.42564838 , 0.979045191678 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = ['147003030', '147009078']
#fname = processed_data_folder + "/pulse_finding/potSource_21.h5"
#loc = [ -34461.9254663 , -13164.2950072 , -266.300418148 , 0.979027680169 ]
#polarization = 1
#stations_to_skip = []
#bad_antennas = []
stations_to_skip += ['RS305', 'RS306']
bad_antennas += ['147001014']
plot_stations(timeID,
polarization=polarization, ##0 is even. 1 is odd
input_file_name=fname,
source_XYZT = loc,
known_station_delays = known_station_delays,
stations = "all", ## all, RS, or CS
referance_station="CS002",
min_antenna_amplitude=10,
### next three lines only needed to make plot very pretty
skip_stations=stations_to_skip,
antennas_to_exclude = bad_antennas,
plot_peak_time=True,
plot_real=False,
seperation_factor=1)#0.25)
#plot_stations_AllData(timeID,
# input_file_name=fname,
# source_XYZT = loc,
# known_station_delays = known_station_delays,
# stations = "RS", ## all, RS, or CS
# referance_station="CS002",
# min_antenna_amplitude=10,
# ### next three lines only needed to make plot very pretty
# skip_stations=stations_to_skip,
# antennas_to_exclude = bad_antennas,
# seperation_factor=1)#0.25)
plot_one_station(timeID,
polarization=polarization, ##0 is even. 1 is odd
input_file_name=fname,
source_XYZT = loc,
known_station_delays = known_station_delays,
station ='RS307',
referance_station="CS002",
min_antenna_amplitude=10,
plot_real=True)
#plot_one_station_allData(timeID,
# input_file_name=fname,
# source_XYZT = loc,
# known_station_delays = known_station_delays,
# station = "CS002",
# referance_station="CS002",
# min_antenna_amplitude=10)
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/stationTimings/examples/run_plotPulse.py
|
Python
|
mit
| 7,557
|
[
"Brian"
] |
d65753455e9012c7b372e43368194a76d1dc338cf5145945dfd611e91f44b72c
|
### All lines that are commented out (and some that aren't) are optional ###
DB_ENGINE = 'sqlite:///db.sqlite'
#DB_ENGINE = 'mysql://user:pass@localhost/monocle'
#DB_ENGINE = 'postgresql://user:pass@localhost/monocle
AREA_NAME = 'SLC' # the city or region you are scanning
LANGUAGE = 'EN' # ISO 639-1 codes EN, DE, ES, FR, IT, JA, KO, PT, or ZH for Pokémon/move names
MAX_CAPTCHAS = 100 # stop launching new visits if this many CAPTCHAs are pending
SCAN_DELAY = 10 # wait at least this many seconds before scanning with the same account
SPEED_UNIT = 'miles' # valid options are 'miles', 'kilometers', 'meters'
SPEED_LIMIT = 19.5 # limit worker speed to this many SPEED_UNITs per hour
# The number of simultaneous workers will be these two numbers multiplied.
# On the initial run, workers will arrange themselves in a grid across the
# rectangle you defined with MAP_START and MAP_END.
# The rows/columns will also be used for the dot grid in the console output.
# Provide more accounts than the product of your grid to allow swapping.
GRID = (4, 4) # rows, columns
# the corner points of a rectangle for your workers to spread out over before
# any spawn points have been discovered
MAP_START = (40.7913, -111.9398)
MAP_END = (40.7143, -111.8046)
# do not visit spawn points outside of your MAP_START and MAP_END rectangle
# the boundaries will be the rectangle created by MAP_START and MAP_END, unless
STAY_WITHIN_MAP = True
# ensure that you visit within this many meters of every part of your map during bootstrap
# lower values are more thorough but will take longer
BOOTSTRAP_RADIUS = 120
GIVE_UP_KNOWN = 75 # try to find a worker for a known spawn for this many seconds before giving up
GIVE_UP_UNKNOWN = 60 # try to find a worker for an unknown point for this many seconds before giving up
SKIP_SPAWN = 90 # don't even try to find a worker for a spawn if the spawn time was more than this many seconds ago
# How often should the mystery queue be reloaded (default 90s)
# this will reduce the grouping of workers around the last few mysteries
#RESCAN_UNKNOWN = 90
# filename of accounts CSV
ACCOUNTS_CSV = 'accounts.csv'
# the directory that the pickles folder, socket, CSV, etc. will go in
# defaults to working directory if not set
#DIRECTORY = None
#######RAIDS#########
#
RAID_LEVEL_MIN = 1
#
#####################
# MUST MATCH YOUR PGSCOUT CONFIG.JSON. Will encounter based on ENCOUNTER_IDs above.
# If encounter fails, worker.py will revert to the original worker with the sighting and encounter
# but will not return any move/IV data (so your hooks dont get improper info)
#PGSCOUT_PORT ='4242'
# Set the connection timeout to wait on a response from PGScout. Default is 36 seconds.
# Timeout will be connection dependent, proxy dependent, etc. I recommend keeping it at the default.
# Going too high will certainly guarantee a response from a Scout but will lead to greater inefficiency
# and instability for Monocle
#PGSCOUT_TIMEOUT = 36
# Limit the number of simultaneous logins to this many at a time.
# Lower numbers will increase the amount of time it takes for all workers to
# get started but are recommended to avoid suddenly flooding the servers with
# accounts and arousing suspicion.
SIMULTANEOUS_LOGINS = 4
# Limit the number of workers simulating the app startup process simultaneously.
SIMULTANEOUS_SIMULATION = 10
# Immediately select workers whose speed are below (SPEED_UNIT)p/h instead of
# continuing to try to find the worker with the lowest speed.
# May increase clustering if you have a high density of workers.
GOOD_ENOUGH = 0.1
# Seconds to sleep after failing to find an eligible worker before trying again.
SEARCH_SLEEP = 2.5
## alternatively define a Polygon to use as boundaries (requires shapely)
## if BOUNDARIES is set, STAY_WITHIN_MAP will be ignored
## more information available in the shapely manual:
## http://toblerity.org/shapely/manual.html#polygons
#from shapely.geometry import Polygon
#BOUNDARIES = Polygon(((40.799609, -111.948556), (40.792749, -111.887341), (40.779264, -111.838078), (40.761410, -111.817908), (40.728636, -111.805293), (40.688833, -111.785564), (40.689768, -111.919389), (40.750461, -111.949938)))
# key for Bossland's hashing server, otherwise the old hashing lib will be used
#HASH_KEY = '9d87af14461b93cb3605' # this key is fake
# Skip PokéStop spinning and egg incubation if your request rate is too high
# for your hashing subscription.
# e.g.
# 75/150 hashes available 35/60 seconds passed => fine
# 70/150 hashes available 30/60 seconds passed => throttle (only scan)
# value: how many requests to keep as spare (0.1 = 10%), False to disable
#SMART_THROTTLE = 0.1
# Swap the worker that has seen the fewest Pokémon every x seconds
# Defaults to whatever will allow every worker to be swapped within 6 hours
#SWAP_OLDEST = 300 # 5 minutes
# Only swap if it's been active for more than x minutes
#MINIMUM_RUNTIME = 10
### these next 6 options use more requests but look more like the real client
APP_SIMULATION = True # mimic the actual app's login requests
COMPLETE_TUTORIAL = True # complete the tutorial process and configure avatar for all accounts that haven't yet
INCUBATE_EGGS = True # incubate eggs if available
## encounter Pokémon to store IVs.
## valid options:
# 'all' will encounter every Pokémon that hasn't been already been encountered
# 'some' will encounter Pokémon if they are in ENCOUNTER_IDS or eligible for notification
# 'notifying' will encounter Pokémon that are eligible for notifications
# None will never encounter Pokémon
ENCOUNTER = None
#ENCOUNTER_IDS = (3, 6, 9, 45, 62, 71, 80, 85, 87, 89, 91, 94, 114, 130, 131, 134)
# PokéStops
SPIN_POKESTOPS = True # spin all PokéStops that are within range
SPIN_COOLDOWN = 300 # spin only one PokéStop every n seconds (default 300)
# minimum number of each item to keep if the bag is cleaned
# bag cleaning is disabled if this is not present or is commented out
''' # triple quotes are comments, remove them to use this ITEM_LIMITS example
ITEM_LIMITS = {
1: 20, # Poké Ball
2: 50, # Great Ball
3: 100, # Ultra Ball
101: 0, # Potion
102: 0, # Super Potion
103: 0, # Hyper Potion
104: 40, # Max Potion
201: 0, # Revive
202: 40, # Max Revive
701: 20, # Razz Berry
702: 20, # Bluk Berry
703: 20, # Nanab Berry
704: 20, # Wepar Berry
705: 20, # Pinap Berry
}
'''
# Update the console output every x seconds
REFRESH_RATE = 0.75 # 750ms
# Update the seen/speed/visit/speed stats every x seconds
STAT_REFRESH = 5
# sent with GET_PLAYER requests, should match your region
PLAYER_LOCALE = {'country': 'US', 'language': 'en', 'timezone': 'America/Denver'}
# retry a request after failure this many times before giving up
MAX_RETRIES = 3
# number of seconds before timing out on a login request
LOGIN_TIMEOUT = 2.5
# add spawn points reported in cell_ids to the unknown spawns list
#MORE_POINTS = False
# Set to True to kill the scanner when a newer version is forced
#FORCED_KILL = False
# exclude these Pokémon from the map by default (only visible in trash layer)
TRASH_IDS = (
16, 19, 21, 29, 32, 41, 46, 48, 50, 52, 56, 74, 77, 96, 111, 133,
161, 163, 167, 177, 183, 191, 194
)
# include these Pokémon on the "rare" report
RARE_IDS = (3, 6, 9, 45, 62, 71, 80, 85, 87, 89, 91, 94, 114, 130, 131, 134)
from datetime import datetime
REPORT_SINCE = datetime(2017, 2, 17) # base reports on data from after this date
# used for altitude queries and maps in reports
#GOOGLE_MAPS_KEY = 'OYOgW1wryrp2RKJ81u7BLvHfYUA6aArIyuQCXu4' # this key is fake
REPORT_MAPS = True # Show maps on reports
#ALT_RANGE = (1250, 1450) # Fall back to altitudes in this range if Google query fails
## Round altitude coordinates to this many decimal places
## More precision will lead to larger caches and more Google API calls
## Maximum distance from coords to rounded coords for precisions (at Lat40):
## 1: 7KM, 2: 700M, 3: 70M, 4: 7M
#ALT_PRECISION = 2
## Automatically resolve captchas using 2Captcha key.
#CAPTCHA_KEY = '1abc234de56fab7c89012d34e56fa7b8'
## the number of CAPTCHAs an account is allowed to receive before being swapped out
#CAPTCHAS_ALLOWED = 3
## Get new accounts from the CAPTCHA queue first if it's not empty
#FAVOR_CAPTCHA = True
# allow displaying the live location of workers on the map
MAP_WORKERS = True
# filter these Pokemon from the map to reduce traffic and browser load
#MAP_FILTER_IDS = [161, 165, 16, 19, 167]
# unix timestamp of last spawn point migration, spawn times learned before this will be ignored
LAST_MIGRATION = 1481932800 # Dec. 17th, 2016
# Treat a spawn point's expiration time as unknown if nothing is seen at it on more than x consecutive visits
FAILURES_ALLOWED = 2
## Map data provider and appearance, previews available at:
## https://leaflet-extras.github.io/leaflet-providers/preview/
#MAP_PROVIDER_URL = '//{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
#MAP_PROVIDER_ATTRIBUTION = '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors'
# set of proxy addresses and ports
# SOCKS requires aiosocks to be installed
#PROXIES = {'http://127.0.0.1:8080', 'https://127.0.0.1:8443', 'socks5://127.0.0.1:1080'}
# convert spawn_id to integer for more efficient DB storage, set to False if
# using an old database since the data types are incompatible.
#SPAWN_ID_INT = True
# Bytestring key to authenticate with manager for inter-process communication
#AUTHKEY = b'm3wtw0'
# Address to use for manager, leave commented if you're not sure.
#MANAGER_ADDRESS = r'\\.\pipe\monocle' # must be in this format for Windows
#MANAGER_ADDRESS = 'monocle.sock' # the socket name for Unix systems
#MANAGER_ADDRESS = ('127.0.0.1', 5002) # could be used for CAPTCHA solving and live worker maps on remote systems
# Store the cell IDs so that they don't have to be recalculated every visit.
# Enabling will (potentially drastically) increase memory usage.
#CACHE_CELLS = False
# Only for use with web_sanic (requires PostgreSQL)
#DB = {'host': '127.0.0.1', 'user': 'monocle_role', 'password': 'pik4chu', 'port': '5432', 'database': 'monocle'}
# Disable to use Python's event loop even if uvloop is installed
#UVLOOP = True
# The number of coroutines that are allowed to run simultaneously.
#COROUTINES_LIMIT = GRID[0] * GRID[1]
### FRONTEND CONFIGURATION
LOAD_CUSTOM_HTML_FILE = False # File path MUST be 'templates/custom.html'
LOAD_CUSTOM_CSS_FILE = False # File path MUST be 'static/css/custom.css'
LOAD_CUSTOM_JS_FILE = False # File path MUST be 'static/js/custom.js'
#FB_PAGE_ID = None
#TWITTER_SCREEN_NAME = None # Username withouth '@' char
#DISCORD_INVITE_ID = None
#TELEGRAM_USERNAME = None # Username withouth '@' char
## Variables below will be used as default values on frontend
FIXED_OPACITY = False # Make marker opacity independent of remaining time
SHOW_TIMER = False # Show remaining time on a label under each pokemon marker
### OPTIONS BELOW THIS POINT ARE ONLY NECESSARY FOR NOTIFICATIONS ###
NOTIFY = False # enable notifications
# create images with Pokémon image and optionally include IVs and moves
# requires cairo and ENCOUNTER = 'notifying' or 'all'
TWEET_IMAGES = True
# IVs and moves are now dependant on level, so this is probably not useful
IMAGE_STATS = False
# As many hashtags as can fit will be included in your tweets, these will
# be combined with landmark-specific hashtags (if applicable).
HASHTAGS = {AREA_NAME, 'Monocle', 'PokemonGO'}
#TZ_OFFSET = 0 # UTC offset in hours (if different from system time)
# the required number of seconds remaining to notify about a Pokémon
TIME_REQUIRED = 600 # 10 minutes
### Only set either the NOTIFY_RANKING or NOTIFY_IDS, not both!
# The (x) rarest Pokémon will be eligible for notification. Whether a
# notification is sent or not depends on its score, as explained below.
NOTIFY_RANKING = 90
# Pokémon to potentially notify about, in order of preference.
# The first in the list will have a rarity score of 1, the last will be 0.
#NOTIFY_IDS = (130, 89, 131, 3, 9, 134, 62, 94, 91, 87, 71, 45, 85, 114, 80, 6)
# Sightings of the top (x) will always be notified about, even if below TIME_REQUIRED
# (ignored if using NOTIFY_IDS instead of NOTIFY_RANKING)
ALWAYS_NOTIFY = 14
# Always notify about the following Pokémon even if their time remaining or scores are not high enough
#ALWAYS_NOTIFY_IDS = {89, 130, 144, 145, 146, 150, 151}
# Never notify about the following Pokémon, even if they would otherwise be eligible
#NEVER_NOTIFY_IDS = TRASH_IDS
# Override the rarity score for particular Pokémon
# format is: {pokemon_id: rarity_score}
#RARITY_OVERRIDE = {148: 0.6, 149: 0.9}
# Ignore IV score and only base decision on rarity score (default if IVs not known)
#IGNORE_IVS = False
# Ignore rarity score and only base decision on IV score
#IGNORE_RARITY = False
# The Pokémon score required to notify goes on a sliding scale from INITIAL_SCORE
# to MINIMUM_SCORE over the course of FULL_TIME seconds following a notification
# Pokémon scores are an average of the Pokémon's rarity score and IV score (from 0 to 1)
# If NOTIFY_RANKING is 90, the 90th most common Pokémon will have a rarity of score 0, the rarest will be 1.
# IV score is the IV sum divided by 45 (perfect IVs).
FULL_TIME = 1800 # the number of seconds after a notification when only MINIMUM_SCORE will be required
INITIAL_SCORE = 0.7 # the required score immediately after a notification
MINIMUM_SCORE = 0.4 # the required score after FULL_TIME seconds have passed
### The following values are fake, replace them with your own keys to enable
### notifications, otherwise exclude them from your config
### You must provide keys for at least one service to use notifications.
#PB_API_KEY = 'o.9187cb7d5b857c97bfcaa8d63eaa8494'
#PB_CHANNEL = 0 # set to the integer of your channel, or to None to push privately
#TWITTER_CONSUMER_KEY = '53d997264eb7f6452b7bf101d'
#TWITTER_CONSUMER_SECRET = '64b9ebf618829a51f8c0535b56cebc808eb3e80d3d18bf9e00'
#TWITTER_ACCESS_KEY = '1dfb143d4f29-6b007a5917df2b23d0f6db951c4227cdf768b'
#TWITTER_ACCESS_SECRET = 'e743ed1353b6e9a45589f061f7d08374db32229ec4a61'
## Telegram bot token is the one Botfather sends to you after completing bot creation.
## Chat ID can be two different values:
## 1) '@channel_name' for channels
## 2) Your chat_id if you will use your own account. To retrieve your ID, write to your bot and check this URL:
## https://api.telegram.org/bot<BOT_TOKEN_HERE>/getUpdates
#TELEGRAM_BOT_TOKEN = '123456789:AA12345qT6QDd12345RekXSQeoZBXVt-AAA'
#TELEGRAM_CHAT_ID = '@your_channel'
#WEBHOOKS = {'http://127.0.0.1:4000'}
##### Referencing landmarks in your tweets/notifications
#### It is recommended to store the LANDMARKS object in a pickle to reduce startup
#### time if you are using queries. An example script for this is in:
#### scripts/pickle_landmarks.example.py
#from pickle import load
#with open('pickles/landmarks.pickle', 'rb') as f:
# LANDMARKS = load(f)
### if you do pickle it, just load the pickle and omit everything below this point
#from monocle.landmarks import Landmarks
#LANDMARKS = Landmarks(query_suffix=AREA_NAME)
# Landmarks to reference when Pokémon are nearby
# If no points are specified then it will query OpenStreetMap for the coordinates
# If 1 point is provided then it will use those coordinates but not create a shape
# If 2 points are provided it will create a rectangle with its corners at those points
# If 3 or more points are provided it will create a polygon with vertices at each point
# You can specify the string to search for on OpenStreetMap with the query parameter
# If no query or points is provided it will query with the name of the landmark (and query_suffix)
# Optionally provide a set of hashtags to be used for tweets about this landmark
# Use is_area for neighborhoods, regions, etc.
# When selecting a landmark, non-areas will be chosen first if any are close enough
# the default phrase is 'in' for areas and 'at' for non-areas, but can be overriden for either.
### replace these with well-known places in your area
## since no points or query is provided, the names provided will be queried and suffixed with AREA_NAME
#LANDMARKS.add('Rice Eccles Stadium', shortname='Rice Eccles', hashtags={'Utes'})
#LANDMARKS.add('the Salt Lake Temple', shortname='the temple', hashtags={'TempleSquare'})
## provide two corner points to create a square for this area
#LANDMARKS.add('City Creek Center', points=((40.769210, -111.893901), (40.767231, -111.888275)), hashtags={'CityCreek'})
## provide a query that is different from the landmark name so that OpenStreetMap finds the correct one
#LANDMARKS.add('the State Capitol', shortname='the Capitol', query='Utah State Capitol Building')
### area examples ###
## query using name, override the default area phrase so that it says 'at (name)' instead of 'in'
#LANDMARKS.add('the University of Utah', shortname='the U of U', hashtags={'Utes'}, phrase='at', is_area=True)
## provide corner points to create a polygon of the area since OpenStreetMap does not have a shape for it
#LANDMARKS.add('Yalecrest', points=((40.750263, -111.836502), (40.750377, -111.851108), (40.751515, -111.853833), (40.741212, -111.853909), (40.741188, -111.836519)), is_area=True)
|
FreddyPoGo/Monocle
|
config.example.py
|
Python
|
mit
| 17,810
|
[
"VisIt"
] |
9361d6e4837b84fe9cf85157841da1f616ff3aba58a9d4f629d22c63a07f2f2c
|
import os
import sys
import re
import bdsf as bdsm # bdsm it is and bdsm it shall remain
import numpy
import Tigger
import tempfile
import astropy.io.fits as pyfits
import yaml
import shlex
import shutil
import glob
import subprocess
from astLib.astWCS import WCS
from Tigger.Models import SkyModel, ModelClasses
CONFIG = os.environ['CONFIG']
INPUT = os.environ['INPUT']
OUTPUT = os.environ['OUTPUT']
MSDIR = os.environ['MSDIR']
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
write_catalog = ['bbs_patches', 'bbs_patches_mask',
'catalog_type', 'clobber', 'correct_proj', 'format',
'incl_chan', 'incl_empty', 'srcroot', 'port2tigger', 'outfile']
img_opts = {}
write_opts = {}
# Spectral fitting parameters
freq0 = None
spi_do = False
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name in ['multi_chan_beam']:
multi_chan_beam = value
continue
if name in write_catalog:
write_opts[name] = value
elif name in ['freq0', 'frequency']:
freq0 = value
else:
img_opts[name] = value
if name == 'spectralindex_do':
spi_do = value
img_opts.pop('freq0', None)
if freq0 is None:
with pyfits.open(img_opts['filename']) as hdu:
hdr = hdu[0].header
for i in xrange(1, hdr['NAXIS']+1):
if hdr['CTYPE{0:d}'.format(i)].startswith('FREQ'):
freq0 = hdr['CRVAL{0:d}'.format(i)]
if spi_do and multi_chan_beam:
with pyfits.open(img_opts['filename']) as hdu:
hdr = hdu[0].header
beams = []
# Get a sequence of BMAJ with digit suffix from the image header keys
bmaj_ind = filter(lambda a: a.startswith('BMAJ')
and a[-1].isdigit(), hdr.keys())
for bmaj in bmaj_ind:
ind = bmaj.split('BMAJ')[-1]
beam = [hdr['{0:s}{1:s}'.format(b, ind)]
for b in 'BMAJ BMIN BPA'.split()]
beams.append(tuple(beam))
# parse beam info to pybdsm
img_opts['beam_spectrum'] = beams
image = img_opts.pop('filename')
filename = os.path.basename(image)
outfile = write_opts.pop('outfile')
try:
img = bdsm.process_image(image, **img_opts)
port2tigger = write_opts.pop('port2tigger', True)
if port2tigger:
write_opts['format'] = 'fits'
img.write_catalog(outfile=outfile, **write_opts)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
if not port2tigger:
sys.exit(0)
# convert to Gaul file to Tigger LSM
# First make dummy tigger model
tfile = tempfile.NamedTemporaryFile(suffix='.txt')
tfile.flush()
prefix = os.path.splitext(outfile)[0]
tname_lsm = prefix + ".lsm.html"
with open(tfile.name, "w") as stdw:
stdw.write("#format:name ra_d dec_d i emaj_s emin_s pa_d\n")
model = Tigger.load(tfile.name)
tfile.close()
def tigger_src(src, idx):
name = "SRC%d" % idx
flux = ModelClasses.Polarization(
src["Total_flux"], 0, 0, 0, I_err=src["E_Total_flux"])
ra, ra_err = map(numpy.deg2rad, (src["RA"], src["E_RA"]))
dec, dec_err = map(numpy.deg2rad, (src["DEC"], src["E_DEC"]))
pos = ModelClasses.Position(ra, dec, ra_err=ra_err, dec_err=dec_err)
ex, ex_err = map(numpy.deg2rad, (src["DC_Maj"], src["E_DC_Maj"]))
ey, ey_err = map(numpy.deg2rad, (src["DC_Min"], src["E_DC_Min"]))
pa, pa_err = map(numpy.deg2rad, (src["PA"], src["E_PA"]))
if ex and ey:
shape = ModelClasses.Gaussian(
ex, ey, pa, ex_err=ex_err, ey_err=ey_err, pa_err=pa_err)
else:
shape = None
source = SkyModel.Source(name, pos, flux, shape=shape)
# Adding source peak flux (error) as extra flux attributes for sources,
# and to avoid null values for point sources I_peak = src["Total_flux"]
if shape:
source.setAttribute("I_peak", src["Peak_flux"])
source.setAttribute("I_peak_err", src["E_peak_flux"])
else:
source.setAttribute("I_peak", src["Total_flux"])
source.setAttribute("I_peak_err", src["E_Total_flux"])
if spi_do:
# Check if start frequency is provided if not provided raise error.
# It is used to define tigger source spectrum index frequency
if freq0:
spi, spi_err = (src['Spec_Indx'], src['E_Spec_Indx'])
source.spectrum = ModelClasses.SpectralIndex(spi, freq0)
source.setAttribute('spi_error', spi_err)
else:
raise RuntimeError("No start frequency (freq0) provided.")
return source
with pyfits.open(outfile) as hdu:
data = hdu[1].data
for i, src in enumerate(data):
model.sources.append(tigger_src(src, i))
wcs = WCS(image)
centre = wcs.getCentreWCSCoords()
model.ra0, model.dec0 = map(numpy.deg2rad, centre)
model.save(tname_lsm)
# Rename using CORPAT
_runc = "tigger-convert %s --rename -f" % tname_lsm
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
|
SpheMakh/Stimela
|
stimela/cargo/cab/pybdsm/src/run.py
|
Python
|
gpl-2.0
| 5,616
|
[
"Gaussian"
] |
4b2f1ff5720b3c0258e98731e272a934f5f55178f46ef4b02ae94bcb2882f774
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Configuration file for CV Repository."""
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
try:
from sphinxcontrib import spelling
except ImportError:
spelling = None
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
if spelling is not None:
extensions.append('sphinxcontrib.spelling')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_static']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = """Brian Moss"""
copyright = '2020, Brian Moss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'kallimachos@gmail.com'
# The full version, including alpha/beta/rc tags.
# release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'samples', 'README.rst', 'common/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = 'sphinx_rtd_theme'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navigation_depth': 1,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/profile_pic_square.png'
# The name of an image file (relative to this directory) to use as favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/profile_pic_square.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # overrides wide tables in RTD theme
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cv'
# this will change the 'paragraph' character to '#'
html_add_permalinks = '#'
rst_prolog = """.. |br| raw:: html
<br />
"""
|
kallimachos/cv
|
doc/conf.py
|
Python
|
gpl-3.0
| 6,294
|
[
"Brian"
] |
9ea327a93267a96e182caa746ed639bdbd5e9fb8e2a774f6a5370f7d5c383cc3
|
# Copyright 2017 Bateared Collie
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from vtk.util import numpy_support
import stk.hyperCubeUtility as hu
import rsf.api as sfapi
import tempfile
import numpy as np
def ToHyperCubeData(MdgscrInput,name="HyperCubeData"):
'''
@summary: Converts an Madgascar input Object to vtkHyperCube
@param MdgscrInput: input data cube
@type MdgscrInput: rsf.api.Input
@param name: name for the data set
@type name: string
** Example Usage **
>>> import stk.m8rExt as md
>>> cube=md.ToHyperCubeData(Input)
'''
# Work out number of dimensions
delta=[]
origin=[]
axis=1
nn = MdgscrInput.int("n"+str(axis))
while nn !=None and axis <= len(MdgscrInput.__array__().shape):
delta.append( MdgscrInput.float("d"+str(axis)) )
origin.append( MdgscrInput.float("o"+str(axis)) )
axis=axis+1
nn = MdgscrInput.int("n"+str(axis))
cube = hu.hyperCubeGenerate(array=MdgscrInput.__array__(),
delta=delta,
origin=origin,
name = name,
)
return cube
def M8rToHyperCubeData(m8rfile,name="HyperCubeData"):
'''
@summary: Converts an Madgascar m8r Object to vtkHyperCube
@param m8rfile: input data cube (note this is destroyed)
@type m8rfile: m8r.File
@param name: name for the data set
@type name: string
** Example Usage **
>>> import stk.m8rExt as md
>>> grid = sf.math(output="sin(x1)*cos(x2)",
n1=101,n2=101,
d1=0.2,d2=0.2,
o1=10,o2=-10.)[0]
>>> cube=md.M8rToHyperCubeData(grid)
'''
MdgscrInput=sfapi.Input(m8rfile.__str__())
return ToHyperCubeData(MdgscrInput,name="HyperCubeData")
def ToMadagascar(hypercube,filename=None,
name="HyperCubeData"):
'''
@summary: Generates a Madagascar Input Object from vtkHyperCube
@param hypercube: input vtkHyperCube object
@type hypercube: vtkHyperCube
@param filename: filename for output (if not given a temp file is used)
@type filename: string
@param name: scalar array name to take from vtkTracePanelData object
@type name: string
** Example Usage **
>>> import stk.mdExt as md
>>> mdObj = md.ToMadagascar(cube)
'''
if filename == None:
ii, filename = tempfile.mkstemp(suffix=".rsf")
file=sfapi.Output(filename)
naxis = hypercube.GetNDimensions()
dims=np.zeros([naxis],dtype=np.int)
hypercube.GetFullDimensions(dims)
for i,nn in enumerate(dims):
file.put("n"+str(i+1),nn)
file.put("d"+str(i+1),hypercube.GetAxisSpacing(i))
file.put("o"+str(i+1),hypercube.GetAxisOrigin(i))
data = numpy_support.vtk_to_numpy(hypercube.GetPointData().GetScalars(name))
file.write(data)
file.close()
return file
|
batearedcollie/seisTK
|
stk/mdExt/hyperCube.py
|
Python
|
bsd-3-clause
| 4,492
|
[
"VTK"
] |
0ce11d39b742c76d694426b637a11893860a1f9d8db43e13c20ca703568167a1
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Forms in OpenLP are made up of two classes. One class holds all the graphical
elements, like buttons and lists, and the other class holds all the functional
code, like slots and loading and saving.
The first class, commonly known as the **Dialog** class, is typically named
``Ui_<name>Dialog``. It is a slightly modified version of the class that the
``pyuic4`` command produces from Qt4's .ui file. Typical modifications will be
converting most strings from "" to u'' and using OpenLP's ``translate()``
function for translating strings.
The second class, commonly known as the **Form** class, is typically named
``<name>Form``. This class is the one which is instantiated and used. It uses
dual inheritance to inherit from (usually) QtGui.QDialog and the Ui class
mentioned above, like so::
class AuthorsForm(QtGui.QDialog, Ui_AuthorsDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
This allows OpenLP to use ``self.object`` for all the GUI elements while keeping
them separate from the functionality, so that it is easier to recreate the GUI
from the .ui files later if necessary.
"""
from mediafilesform import MediaFilesForm
from authorsform import AuthorsForm
from topicsform import TopicsForm
from songbookform import SongBookForm
from editverseform import EditVerseForm
from editsongform import EditSongForm
from songmaintenanceform import SongMaintenanceForm
from songimportform import SongImportForm
from songexportform import SongExportForm
|
marmyshev/transitions
|
openlp/plugins/songs/forms/__init__.py
|
Python
|
gpl-2.0
| 3,653
|
[
"Brian"
] |
e9a9cd8dd6967fc0ea957b922b7fd7491ba1f7118afd52679e049911ad25f17c
|
# -*- coding: utf-8 -*-
#
# evaluate_tsodyks2_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Example of the tsodyks2_synapse in NEST
---------------------------------------------
This synapse model implements synaptic short-term depression and short-term f
according to [1] and [2]. It solves Eq (2) from [1] and modulates U according
This connection merely scales the synaptic weight, based on the spike history
parameters of the kinetic model. Thus, it is suitable for all types of synapt
that is current or conductance based.
The parameter A_se from the publications is represented by the
synaptic weight. The variable x in the synapse properties is the
factor that scales the synaptic weight.
Parameters
~~~~~~~~~~~
The following parameters can be set in the status dictionary:
* U double - probability of release increment (U1) [0,1], default=0.
* u double - Maximum probability of release (U_se) [0,1], default=0.
* x double - current scaling factor of the weight, default=U
* tau_rec double - time constant for depression in ms, default=800 ms
* tau_fac double - time constant for facilitation in ms, default=0 (off)
Notes
~~~~~~~
Under identical conditions, the tsodyks2_synapse produces slightly lower
peak amplitudes than the tsodyks_synapse. However, the qualitative behavior
is identical.
This compares the two synapse models.
References
~~~~~~~~~~~
.. [1] Tsodyks, M. V., & Markram, H. (1997). The neural code between
neocortical depends on neurotransmitter release probability. PNAS,
94(2), 719-23.
.. [2] Fuhrmann, G., Segev, I., Markram, H., & Tsodyks, M. V. (2002). Coding of
information by activity-dependent synapses. Journal of
neurophysiology, 8
.. [3] Maass, W., & Markram, H. (2002). Synapses as dynamic memory buffers.
Neural Networks, 15(2), 155-161.
http://dx.doi.org/10.1016/S0893-6080(01)00144-7
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import nest.voltage_trace
nest.ResetKernel()
###############################################################################
# Parameter set for depression
dep_params = {"U": 0.67, "u": 0.67, 'x': 1.0, "tau_rec": 450.0,
"tau_fac": 0.0, "weight": 250.}
###############################################################################
# Parameter set for facilitation
fac_params = {"U": 0.1, "u": 0.1, 'x': 1.0, "tau_fac": 1000.,
"tau_rec": 100., "weight": 250.}
###############################################################################
# Now we assign the parameter set to the synapse models.
t1_params = fac_params # for tsodyks_synapse
t2_params = t1_params.copy() # for tsodyks2_synapse
nest.SetDefaults("tsodyks2_synapse", t1_params)
nest.SetDefaults("tsodyks_synapse", t2_params)
nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3.})
###############################################################################
# Create three neurons.
neuron = nest.Create("iaf_psc_exp", 3)
###############################################################################
# Neuron one produces spikes. Neurons 2 and 3 receive the spikes via the two
# synapse models.
nest.Connect([neuron[0]], [neuron[1]], syn_spec="tsodyks_synapse")
nest.Connect([neuron[0]], [neuron[2]], syn_spec="tsodyks2_synapse")
###############################################################################
# Now create two voltmeters to record the responses.
voltmeter = nest.Create("voltmeter", 2)
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
###############################################################################
# Connect the voltmeters to the neurons.
nest.Connect([voltmeter[0]], [neuron[1]])
nest.Connect([voltmeter[1]], [neuron[2]])
###############################################################################
# Now simulate the standard STP protocol: a burst of spikes, followed by a
# pause and a recovery response.
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
###############################################################################
# Finally, generate voltage traces. Both are shown in the same plot and
# should be almost completely overlapping.
nest.voltage_trace.from_device([voltmeter[0]])
nest.voltage_trace.from_device([voltmeter[1]])
|
terhorstd/nest-simulator
|
pynest/examples/evaluate_tsodyks2_synapse.py
|
Python
|
gpl-2.0
| 5,079
|
[
"NEURON"
] |
7b760788cdb26189e241fbc2330a0e8a7a405fa9d219ee043b6a0771e8875803
|
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#
# exemple repris de test_HEXABLOCK.py
import os
import GEOM
import SALOMEDS
import hexablock
print "test grille spherique..."
doc = hexablock.addDocument()
orig = doc.addVertex(0, 0, 0)
direction = doc.addVector(1, 1, 1)
#n = 2
n = 2
# k = 0.8 # amincit les couches (ou epaissit : si < 1 ou > 1)
k = 0.8 # amincit les couches (ou epaissit : si < 1 ou > 1)
grid = doc.makeSpherical(orig, direction, n, k)
file_name = os.path.join(os.environ['TMP'], 'grille_spherique.vtk')
#### grid.saveVtk(file_name)
print "...test grille spherique OK"
|
FedoraScientific/salome-hexablock
|
doc/pyplots/test_make_spher_grid.py
|
Python
|
lgpl-2.1
| 1,410
|
[
"VTK"
] |
9d915edbf843975a82ad57bd7561364514c978d4f3d76bd0fd7c200461d1295b
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Python modules repository
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import ast
import imp
import json
import logging
import os
import cohorte
import cohorte.repositories
from cohorte.repositories.beans import Artifact, Version
import cohorte.version
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \
Invalidate, Validate
from pelix.utilities import is_string
# ######### added by: Bassem D.
# #########
# Pelix
# Repository beans
# ------------------------------------------------------------------------------
# Bundle version
__version__ = cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Module(Artifact):
"""
Represents a bundle
"""
def __init__(self, name, version, imports, filename):
"""
Sets up the bundle details
:param name: Name of the module
:param version: Version of the module (as a string)
:param imports: List of names of imported modules
:param filename: Path to the .py file
:raise ValueError: Invalid argument
"""
Artifact.__init__(self, "python", name, version, filename)
# Store information
self.all_imports = imports
def imports(self, artifact):
"""
Tests if this module might import the given artifact
:param artifact: Another artifact
:return: True if this module imports the given one
"""
if artifact.language != self.language:
# No inter-language imports
return False
return artifact.name in self.all_imports
# ------------------------------------------------------------------------------
class AstVisitor(ast.NodeVisitor):
"""
AST visitor to extract imports and version
"""
# pylint: disable=invalid-name
def __init__(self, module_name, is_package):
"""
Sets up the visitor
:param module_name: The module name
:param is_package: Whether the name is a package name
"""
ast.NodeVisitor.__init__(self)
self.imports = set()
self.version = None
self.module_parts = module_name.split(".")
# Drop module name, keeping only packages' names
if not is_package:
self.module_parts = self.module_parts[:-1]
self.module_name = module_name
def generic_visit(self, node):
"""
Custom default visit method that avoids to visit further that the
module level.
"""
if type(node) is ast.Module:
ast.NodeVisitor.generic_visit(self, node)
def resolve_relative_import_from(self, node):
"""
Converts a relative import (import .module) into an absolute one
:param node: An ImportFrom AST node
:return: The absolute module name
"""
if node.level > 0:
# Relative import
if node.level == 1:
parent = '.'.join(self.module_parts)
else:
parent = '.'.join(self.module_parts[:-node.level + 1])
if node.module:
# from .module import ...
return '.'.join((parent, node.module))
else:
# from . import ...
return parent
else:
# Absolute import
return node.module
def visit_Import(self, node):
"""
Found an "import"
"""
for alias in node.names:
self.imports.add(alias.name)
def visit_ImportFrom(self, node):
"""
Found a "from ... import ..."
"""
imported = self.resolve_relative_import_from(node)
self.imports.add(imported)
def visit_Assign(self, node):
"""
Found an assignment
"""
field = getattr(node.targets[0], 'id', None)
if not self.version \
and field in ('__version__', '__version_info__'):
try:
version_parsed = ast.literal_eval(node.value)
if isinstance(version_parsed, (tuple, list)):
self.version = ".".join(str(version_parsed))
else:
self.version = str(version_parsed)
except ValueError:
# Ignore errors
pass
def _extract_module_info(filename, module_name, is_package):
"""
Extract the version and the imports from the given Python file
:param filename: Path to the file to parse
:param module_name: The fully-qualified module name
:param is_package: Whether the name is a package name
:return: A (version, [imports]) tuple
:raise ValueError: Unreadable file
"""
try:
with open(filename,encoding="utf8") as filep:
source = filep.read()
except (OSError, IOError,TypeError) as ex:
try:
import io
with io.open(filename,encoding="utf8") as filep:
source = filep.read()
except (OSError, IOError) as ex2:
_logger.exception(ex2)
raise ValueError("Error reading {0}: {1}".format(filename, ex))
visitor = AstVisitor(module_name, is_package)
try:
module = ast.parse(source, filename, 'exec')
except (ValueError, SyntaxError, TypeError) as ex:
raise ValueError("Error parsing {0}: {1}".format(filename, ex))
visitor.visit(module)
return visitor.version, visitor.imports
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-artifacts-python-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS)
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python")
class PythonModuleRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
self._language = "python"
# Name -> [Modules]
self._modules = {}
# Directory name -> Package name
self._directory_package = {}
# File -> Module
self._files = {}
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Artifact):
# Test artifact language
if item.language != "python":
return False
# Test if the name is in the modules
return item.name in self._modules
elif item in self._modules:
# Item matches a module name
return True
else:
# Test the file name
for name in (item, os.path.realpath(item)):
if name in self._files:
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual artifacts
"""
return sum((len(modules) for modules in self._modules.values()))
def __add_module(self, module, registry=None):
"""
Adds a module to the registry
:param module: A Module object
:param registry: Registry where to store the module
"""
if registry is None:
registry = self._modules
# Add the module to the registry
modules_list = registry.setdefault(module.name, [])
if module not in modules_list:
modules_list.append(module)
modules_list.sort(reverse=True)
# Associate the file name with the module
self._files[module.file] = module
@staticmethod
def __compute_name(root, filename):
"""
Computes the module name of the given file by looking for '__init__.py'
files in its parent directories
:param filename: Path of the module file
:return: The Python name of the module, and a boolean indicating
whether the name is a package name
:raise ValueError: Invalid directory name
"""
# Subtract the root part
filename = os.path.relpath(filename, root)
# Drop extension
filename = os.path.splitext(filename)[0]
name_parts = filename.split(os.path.sep)
is_package = name_parts[len(name_parts) - 1] == "__init__"
if is_package:
name_parts = name_parts[:-1]
return ".".join(name_parts), is_package
@staticmethod
def __test_import(name):
"""
Tries to import the given module, using imp.find_module().
:param name: A module name
:return: True if the module can be imported
"""
try:
# find_module() uses a path-like name, not a dotted one
path_name = name.replace('.', os.sep)
result = imp.find_module(path_name)
except ImportError:
# Module not found
return False
else:
# Module found: close the file opened by find_module(), if any
if result[0] is not None:
result[0].close()
return True
def add_file(self, root, filename):
"""
Adds a Python file to the repository
:param root: Path to the python package base of the added file
:param filename: A Python full-path file name
:raise ValueError: Unreadable file
"""
# Compute the real name of the Python file
realfile = os.path.realpath(filename)
if realfile in self._files:
# Already read it: ignore
return
if os.path.basename(filename).startswith('.'):
# Hidden file: ignore
return
# Compute the complete module name
name, is_package = self.__compute_name(root, filename)
# Parse the file
version, imports = _extract_module_info(realfile, name, is_package)
# Store the module
self.__add_module(Module(name, version, imports, realfile))
@staticmethod
def __is_module(dirname):
"""
Class method testing whether a directory, given its name, contains a
valid python package.
:param dirname: The directory' name
:return: True if the directory contains a valid python package.
False otherwise.
"""
init_file = os.path.join(dirname, "__init__.py")
return os.path.exists(init_file)
def add_directory(self, dirname):
"""
Recursively adds all .py modules found in the given directory into the
repository
:param dirname: A path to a directory
"""
for root, dirnames, filenames in os.walk(dirname, followlinks=True):
# Check if the current directory, ie. root, is either the base
# directory or a valid python package.
# Otherwise, do not walk through sub-directories.
if not os.path.samefile(dirname, root) \
and not self.__is_module(root):
continue
for filename in filenames:
if os.path.splitext(filename)[1] == '.py':
fullname = os.path.join(root, filename)
try:
self.add_file(dirname, fullname)
except ValueError as ex:
_logger.warning("Error analyzing %s: %s", fullname, ex)
def clear(self):
"""
Clears the repository content
"""
self._modules.clear()
self._files.clear()
self._directory_package.clear()
def get_artifact(self, name=None, version=None, filename=None,
registry=None):
"""
Retrieves a module from the repository
:param name: The module name (mutually exclusive with filename)
:param version: The module version (None or '0.0.0' for any), ignored
if filename is used
:param filename: The module file name (mutually exclusive with name)
:param registry: Registry where to look for the module
:return: The first matching module
:raise ValueError: If the module can't be found
"""
if registry is None:
registry = self._modules
if filename:
# Use the file name (direct search)
module = self._files.get(filename)
if module:
# Found it
return module
for bundle_file in self._files:
# Search by file base name
if os.path.basename(bundle_file) == filename:
return self._files[bundle_file]
if not name:
# Not found by file name, and no name to look for
raise ValueError("Module file not found: {0}".format(filename))
if isinstance(name, Module):
# Got a module
module = name
if module in registry:
return module
else:
# Use the module name and version
name = module.name
version = module.version
matching = registry.get(name, None)
if not matching:
raise ValueError('Module {0} not found.'.format(name))
for module in matching:
if module.version.matches(version):
return module
raise ValueError('Module {0} not found for version {1}'
.format(name, version))
def get_language(self):
"""
Retrieves the language of the artifacts stored in this repository
"""
return self._language
def resolve_installation(self, artifacts, system_artifacts=None):
"""
Returns all the artifacts that must be installed in order to have the
given modules resolved.
:param artifacts: A list of bundles to be modules
:param system_artifacts: Modules considered as available
:return: A tuple: (modules, dependencies, missing artifacts, [])
"""
# Name -> Module for this resolution
local_modules = {}
# Module -> [Modules]
dependencies = {}
# Missing elements
missing_modules = set()
# Consider system modules already installed
if system_artifacts:
for module in system_artifacts:
if is_string(module):
if module in self._modules:
module = self._modules[module]
else:
module = Module(str(module), None, None, None)
if isinstance(module, Module):
# Only accept modules
self.__add_module(module, local_modules)
# Resolution loop
to_install = [self.get_artifact(name) for name in artifacts]
i = 0
while i < len(to_install):
# Loop control
module = to_install[i]
i += 1
# Add the current module
self.__add_module(module, local_modules)
dependencies[module] = []
# Resolve import ...
for imported in module.all_imports:
# Find the module
registry = None
provider = None
for registry in (local_modules, self._modules):
try:
provider = self.get_artifact(imported, None, None,
registry)
# Found one
break
except ValueError:
# Try next
pass
else:
# No provider found, try to import the file
if not self.__test_import(imported):
# Totally unknown module
missing_modules.add(imported)
# Resolve next import
continue
# Store the module we found
dependencies[module].append(provider)
if registry is self._modules:
# The provider was found in the global registry, store it
self.__add_module(provider, local_modules)
# Store the dependency
dependencies[module].append(provider)
# The new module will be resolved later
if provider not in to_install:
# We'll have to resolve it
to_install.append(provider)
return to_install, dependencies, missing_modules, []
def walk(self):
"""
# Walk through the known artifacts
"""
for modules in self._modules.values():
for module in modules:
yield module
# ######### added by: Bassem D.
def load_cache(self):
"""
Loads the cache from system file to memory
"""
use_cache = os.environ.get('COHORTE_USE_CACHE')
if use_cache and use_cache.lower() == "true":
try:
with open('cache.js') as input_file:
cache = json.load(input_file)
if cache:
_logger.info("loading repository from cache...")
# load modules
for module in cache["modules"]:
name = module["name"]
version = Version(module["version"])
filename = module["filename"]
module_bean = Module(name, version, [], filename)
self.__add_module(module_bean, self._modules)
for directory in cache["directories"]:
self._directory_package[directory["dir_name"]] \
= directory["pkg_name"]
return True
except (IOError, ValueError):
# Error reading/parsing cache file
return False
# No cache
return False
def save_cache(self):
"""
Saves the cache from memory to system file
"""
use_cache = os.environ.get('COHORTE_USE_CACHE')
if use_cache and use_cache.lower() == "true":
# dump modules
_logger.info("Dumping cache info...")
# Name -> [Modules]
cache_modules = [
{"name": module.name, "version": str(module.version),
"language": module.language, "filename": module.file}
for name, modules in self._modules.items()
for module in modules]
# Directory name -> Package name
cache_directories = [
{"dir_name": dir_name,
"pkg_name": self._directory_package[dir_name]}
for dir_name in self._directory_package]
# Write cache
cache = {"modules": cache_modules,
"directories": cache_directories}
with open('cache.js', 'w') as outfile:
json.dump(cache, outfile, indent=4)
# #########
@Validate
def validate(self, context):
"""
Component validated
"""
# ######### added by: Bassem D.
# check if there is a cache file, load it if so
# visit repo files and check if the modification date is changed
# if so, load the file and update the cached entry
# if there were no cache file, we create it at the end of the parsing
status = self.load_cache()
if not status:
_logger.info("Loading repository from file system...")
# #########
# Load repositories in another thread
# Home/Base repository
for key in (cohorte.PROP_BASE, cohorte.PROP_HOME):
repository = os.path.join(context.get_property(key), "repo")
self.add_directory(repository)
# Python path directories
python_path = os.getenv("PYTHONPATH", None)
if python_path:
for path in python_path.split(os.pathsep):
self.add_directory(path)
# ######### added by: Bassem D.
self.save_cache()
# #########
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self.clear()
|
isandlaTech/cohorte-runtime
|
python/cohorte/repositories/python/modules.py
|
Python
|
apache-2.0
| 21,352
|
[
"VisIt"
] |
98387aaea4bed52908fd8eeef250525cb46ad445723f1d4e883568877a98e66f
|
"""
Courseware views functions
"""
import logging
import urllib
import json
from collections import defaultdict
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access
from courseware.courses import get_courses, get_course, get_studio_url, get_course_with_access, sort_by_announcement
from courseware.category import get_primary_course, get_Intermediate_course, get_senior_course
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from student.models import UserTestGroup, CourseEnrollment
from student.views import single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.tabs import CourseTabList, StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from opaque_keys import InvalidKeyError
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor.enrollment import uses_shib
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1)]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course_key)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course_key, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_key),
}
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, section_descriptor, depth=None)
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404("Position {} is not an integer!".format(position))
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
section_field_data_cache,
course_key,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course_key, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
(course_key, chapter, section, position) = path_to_location(modulestore(), usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_key.to_deprecated_string())
elif section is None:
return redirect('courseware_chapter', course_id=course_key.to_deprecated_string(), chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_key)
studio_url = get_studio_url(course_key, 'course_info')
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'studio_url': studio_url,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
raise Http404
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'see_exists', course_key)
registered = registered_for_course(course, request.user)
staff_access = has_access(request.user, 'staff', course)
studio_url = get_studio_url(course_key, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id.to_deprecated_string())
# Used to provide context to message to student if enrollment not allowed
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
})
@ensure_csrf_cookie
@cache_if_anonymous
def course_kinds_about(request, course_kind):
"""this is to show the kinds of course page"""
#print course_kind
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
plist = get_primary_course(courses, course_kind)
mlist = get_Intermediate_course(courses, course_kind)
slist = get_senior_course(courses, course_kind)
plen = len(plist)
mlen = len(mlist)
slen = len(slist)
content = {
'course_kind': course_kind,
'slist': slist,
'plist': plist,
'mlist': mlist,
'plen': plen,
'mlen': mlen,
'slen': slen,
}
return render_to_response('courseware/course_kinds.html', content)
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course_with_access(request.user, 'see_exists', course_key)
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
return render_to_response('courseware/mktg_course_about.html', {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
})
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, SlashSeparatedCourseKey.from_deprecated_string(course_id), student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None)
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course_key, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_key)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_key):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_key)
course = modulestore().get_course(course_key)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid course id.')))
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(
course_id=course_key,
module_state_key=usage_key,
student_id=student.id
)
except User.DoesNotExist:
return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.type in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.type](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
return HttpResponse(status=404)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
|
xiandiancloud/ji
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 38,121
|
[
"VisIt"
] |
907af29ec3f8844e010cc00da10e48fe56a0c03bbf41f898042f6d72940ee6c9
|
import numpy as np
from mpl_toolkits.basemap import pyproj
from datetime import datetime
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
def make_remap_grid_file(Cgrd, Cpos='t'):
#create remap file
remap_filename = 'remap_grid_' + Cgrd.name + '_' + Cpos + '.nc'
nc = netCDF.Dataset(remap_filename, 'w', format='NETCDF3_CLASSIC')
nc.Description = 'remap grid file for GLORYS'
nc.Author = 'pyroms_toolbox.CGrid_GLORYS.make_remap_grid_file'
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = Cgrd.name
lon_corner = Cgrd.lon_t_vert
lat_corner = Cgrd.lat_t_vert
grid_center_lon = Cgrd.lon_t.flatten()
grid_center_lat = Cgrd.lat_t.flatten()
Mp, Lp = Cgrd.lon_t.shape
if Cpos == 't':
grid_imask = Cgrd.mask_t[0,:].flatten()
elif Cpos == 'u':
grid_imask = Cgrd.mask_u[0,:].flatten()
elif Cpos == 'v':
grid_imask = Cgrd.mask_v[0,:].flatten()
grid_size = Lp * Mp
grid_corner_lon = np.zeros((grid_size, 4))
grid_corner_lat = np.zeros((grid_size, 4))
k = 0
for j in range(Mp):
for i in range(Lp):
grid_corner_lon[k,0] = lon_corner[j,i]
grid_corner_lat[k,0] = lat_corner[j,i]
grid_corner_lon[k,1] = lon_corner[j,i+1]
grid_corner_lat[k,1] = lat_corner[j,i+1]
grid_corner_lon[k,2] = lon_corner[j+1,i+1]
grid_corner_lat[k,2] = lat_corner[j+1,i+1]
grid_corner_lon[k,3] = lon_corner[j+1,i]
grid_corner_lat[k,3] = lat_corner[j+1,i]
k = k + 1
#Write netcdf file
nc.createDimension('grid_size', grid_size)
nc.createDimension('grid_corners', 4)
nc.createDimension('grid_rank', 2)
nc.createVariable('grid_dims', 'i4', ('grid_rank'))
nc.variables['grid_dims'].long_name = 'grid size along x and y axis'
nc.variables['grid_dims'].units = 'None'
nc.variables['grid_dims'][:] = [(Lp, Mp)]
nc.createVariable('grid_center_lon', 'f8', ('grid_size'))
nc.variables['grid_center_lon'].long_name = 'longitude of cell center'
nc.variables['grid_center_lon'].units = 'degrees'
nc.variables['grid_center_lon'][:] = grid_center_lon
nc.createVariable('grid_center_lat', 'f8', ('grid_size'))
nc.variables['grid_center_lat'].long_name = 'latitude of cell center'
nc.variables['grid_center_lat'].units = 'degrees'
nc.variables['grid_center_lat'][:] = grid_center_lat
nc.createVariable('grid_imask', 'i4', ('grid_size'))
nc.variables['grid_imask'].long_name = 'mask'
nc.variables['grid_imask'].units = 'None'
nc.variables['grid_imask'][:] = grid_imask
nc.createVariable('grid_corner_lon', 'f8', ('grid_size', 'grid_corners'))
nc.variables['grid_corner_lon'].long_name = 'longitude of cell corner'
nc.variables['grid_corner_lon'].units = 'degrees'
nc.variables['grid_corner_lon'][:] = grid_corner_lon
nc.createVariable('grid_corner_lat', 'f8', ('grid_size', 'grid_corners'))
nc.variables['grid_corner_lat'].long_name = 'latitude of cell corner'
nc.variables['grid_corner_lat'].units = 'degrees'
nc.variables['grid_corner_lat'][:] = grid_corner_lat
nc.close()
|
kshedstrom/pyroms
|
pyroms_toolbox/pyroms_toolbox/CGrid_GLORYS/make_remap_grid_file.py
|
Python
|
bsd-3-clause
| 3,218
|
[
"NetCDF"
] |
65e227dba5bd7413dfab7e499bc0830964ff2d0c3a828d48622ab76ef0496cb2
|
"""
NH3 fitter wrapper
==================
Wrapper to fit ammonia spectra. Generates a reasonable guess at the position
and velocity using a gaussian fit
Example use:
.. code:: python
import pyspeckit
sp11 = pyspeckit.Spectrum('spec.nh3_11.dat', errorcol=999)
sp22 = pyspeckit.Spectrum('spec.nh3_22.dat', errorcol=999)
sp33 = pyspeckit.Spectrum('spec.nh3_33.dat', errorcol=999)
sp11.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']
sp22.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['twotwo']
sp33.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['threethree']
input_dict={'oneone':sp11, 'twotwo':sp22, 'threethree':sp33}
spf = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict)
Note that if you want to use the plotter wrapper with cubes, you need to do
something like the following, where the ``plot_special`` method of the stacked
``cubes`` object is set to the ``plotter_override`` function defined in the
fitnh3_wrapper code:
.. code:: python
cubes.plot_special = pyspeckit.wrappers.fitnh3.plotter_override
cubes.plot_special_kwargs = {'fignum':3, 'vrange':[55,135]}
cubes.plot_spectrum(160,99)
"""
from __future__ import print_function
import warnings
from six.moves import xrange
from six import iteritems
import pyspeckit
from .. import spectrum
from ..spectrum.classes import Spectrum, Spectra
from ..spectrum import units
from ..spectrum.models import ammonia_constants
import numpy as np
import copy
import random
from astropy import log
from astropy import units as u
pyspeckit.spectrum.fitters.default_Registry.add_fitter('ammonia_tau_thin',
pyspeckit.spectrum.models.ammonia.ammonia_model_vtau_thin(),
5)
title_dict = {'oneone':'NH$_3(1, 1)$', 'twotwo':'NH$_3(2, 2)$',
'threethree':'NH$_3(3, 3)$', 'fourfour':'NH$_3(4, 4)$',
'fivefive':'NH$_3(5, 5)$', 'sixsix':'NH$_3(6, 6)$',
'sevenseven':'NH$_3(7, 7)$', 'eighteight':'NH$_3(8, 8)$',
}
def fitnh3tkin(input_dict, dobaseline=True, baselinekwargs={}, crop=False,
cropunit=None, guessline='twotwo', tex=15, trot=20, column=15.0,
fortho=0.66, tau=None, thin=False, quiet=False, doplot=True,
fignum=1, guessfignum=2, smooth=False, scale_keyword=None,
rebase=False, tkin=None, npeaks=1, guesses=None,
fittype='ammonia',
guess_error=True, plotter_wrapper_kwargs={}, **kwargs):
"""
Given a dictionary of filenames and lines, fit them together
e.g. {'oneone':'G000.000+00.000_nh3_11.fits'}
Parameters
----------
input_dict : dict
A dictionary in which the keys are the ammonia line names (e.g.,
'oneone', 'twotwo', etc) and the values are either Spectrum objects
or filenames of spectra
dobaseline : bool
Fit and subtract a baseline prior to fitting the model?
Keyword arguments to `pyspeckit.spectrum.Spectrum.baseline` are
specified in ``baselinekwargs``.
baselinekwargs : dict
The keyword arguments for the baseline
crop : bool or tuple
A range of values to crop the spectrum to. The units are specified by
``cropunit``; the default ``None`` will use pixels. If False, no
cropping will be performed.
cropunit : None or astropy unit
The unit for the crop parameter
guess_error : bool
Use the guess line to estimate the error in all spectra?
plotter_wrapper_kwargs : dict
Keyword arguments to pass to the plotter
fittype: 'ammonia' or 'cold_ammonia'
The fitter model to use. This is overridden if `tau` is specified,
in which case one of the `ammonia_tau` models is used (see source code)
"""
if tkin is not None:
if trot == 20 or trot is None:
trot = tkin
else:
raise ValueError("Please specify trot, not tkin")
warnings.warn("Keyword 'tkin' is deprecated; use trot instead", DeprecationWarning)
spdict = dict([(linename, Spectrum(value, scale_keyword=scale_keyword))
if type(value) is str else (linename, value)
for linename, value in iteritems(input_dict)
])
splist = spdict.values()
for transition, sp in spdict.items(): # required for plotting, cropping
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
if crop and len(crop) == 2:
for sp in splist:
sp.crop(*crop, unit=cropunit)
if dobaseline:
for sp in splist:
sp.baseline(**baselinekwargs)
if smooth and type(smooth) is int:
for sp in splist:
sp.smooth(smooth)
spdict[guessline].specfit(fittype='gaussian', negamp=False, vheight=False,
guesses='moments')
ampguess, vguess, widthguess = spdict[guessline].specfit.modelpars
if widthguess < 0:
raise ValueError("Width guess was < 0. This is impossible.")
print("RMS guess (errspec): ", spdict[guessline].specfit.errspec.mean())
print("RMS guess (residuals): ", spdict[guessline].specfit.residuals.std())
errguess = spdict[guessline].specfit.residuals.std()
if rebase:
# redo baseline subtraction excluding the centroid +/- about 20 km/s
vlow = spdict[guessline].specfit.modelpars[1]-(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
vhigh = spdict[guessline].specfit.modelpars[1]+(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
for sp in splist:
sp.baseline(exclude=[vlow, vhigh], **baselinekwargs)
for sp in splist:
if guess_error:
sp.error[:] = errguess
sp.xarr.convert_to_unit(u.GHz)
if doplot:
spdict[guessline].plotter(figure=guessfignum)
spdict[guessline].specfit.plot_fit()
spectra = Spectra(splist)
spectra.specfit.npeaks = npeaks
if tau is not None:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, tau+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
fittype = 'ammonia_tau_thin' if thin else 'ammonia_tau'
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
else:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, column+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
if thin:
raise ValueError("'thin' keyword not supported for the generic ammonia model")
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
if doplot:
plot_nh3(spdict, spectra, fignum=fignum, **plotter_wrapper_kwargs)
return spdict, spectra
def plot_nh3(spdict, spectra, fignum=1, show_components=False,
residfignum=None, show_hyperfine_components=True, annotate=True,
axdict=None, figure=None,
**plotkwargs):
"""
Plot the results from a multi-nh3 fit
spdict needs to be dictionary with form:
'oneone': spectrum,
'twotwo': spectrum,
etc.
"""
from matplotlib import pyplot
if figure is None:
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
splist = spdict.values()
for transition, sp in spdict.items():
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
try:
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
except AttributeError:
pass
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.parinfo = spectra.specfit.parinfo
sp.specfit.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
sp.specfit.model = sp.specfit.fitter.n_ammonia(pars=spectra.specfit.modelpars, parnames=spectra.specfit.fitter.parnames)(sp.xarr)
if axdict is None:
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
if linename not in axdict:
raise NotImplementedError("Plot windows for {0} cannot "
"be automatically arranged (yet)."
.format(linename))
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename], title=title_dict[linename], **plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False, show_components=show_components,
show_hyperfine_components=show_hyperfine_components)
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
if residfignum is not None:
pyplot.figure(residfignum)
pyplot.clf()
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
sp.specfit.plotresiduals(axis=axdict[linename])
def make_axdict(splist, spdict):
from matplotlib import pyplot
axdict = {}
if len(splist) == 2:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=1
elif len(splist) == 3:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
if ii == 1:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=2
else:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
elif len(splist) == 4:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
else:
raise NotImplementedError("Plots with {0} subplots are not yet "
"implemented. Pull requests are "
"welcome!".format(len(splist)))
return axdict
def fitnh3(spectrum, vrange=[-100, 100], vrangeunit='km/s', quiet=False, Tex=20,
trot=15, column=1e15, fortho=1.0, tau=None, Tkin=None,
fittype='ammonia',
spec_convert_kwargs={}):
if Tkin is not None:
if trot == 20 or trot is None:
trot = Tkin
else:
raise ValueError("Please specify trot, not Tkin")
warnings.warn("Keyword 'Tkin' is deprecated; use trot instead", DeprecationWarning)
if vrange:
spectrum.xarr.convert_to_unit(vrangeunit, **spec_convert_kwargs)
spectrum.crop(*vrange, unit=vrangeunit)
spectrum.specfit(fittype='gaussian', negamp=False, guesses='moments')
ampguess, vguess, widthguess = spectrum.specfit.modelpars
if tau is None:
spectrum.specfit(fittype=fittype, quiet=quiet,
guesses=[Tex, trot, column, widthguess, vguess,
fortho])
else:
spectrum.specfit(fittype='ammonia_tau', quiet=quiet,
guesses=[Tex, trot, tau, widthguess, vguess, fortho])
return spectrum
def BigSpectrum_to_NH3dict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
sp.xarr.convert_to_unit('GHz')
spdict = {}
for linename, freq in iteritems(spectrum.models.ammonia.freq_dict):
if not hasattr(freq, 'unit'):
freq = freq*u.Hz
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
log.debug("line {2}: freq test low, high: {0}, {1}"
.format(freq_test_low, freq_test_high, linename))
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy(deep=True)
spdict[linename].xarr.convert_to_unit('GHz')
assert np.all(np.array(spdict[linename].xarr == sp.xarr,
dtype='bool'))
spdict[linename].xarr.refX = freq
spdict[linename].xarr.convert_to_unit('km/s',
velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[linename]*u.Hz,
quiet=True)
np.testing.assert_array_almost_equal(spdict[linename].xarr.as_unit('GHz').value,
sp.xarr.value)
log.debug("Line {0}={2}: {1}".format(linename, spdict[linename],
freq))
if vrange is not None:
try:
spdict[linename] = spdict[linename].slice(start=vrange[0],
stop=vrange[1],
unit='km/s')
log.debug("Successfully cropped {0} to {1}, freq = {2}, {3}"
.format(linename, vrange, freq,
spdict[linename].xarr))
if len(spdict[linename]) == 0:
spdict.pop(linename)
log.debug("Removed {0} from spdict".format(linename))
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
else:
log.debug("Line {0} not in spectrum".format(linename))
# this shouldn't be reachable, but there are reported cases where spdict
# gets populated w/empty spectra, which leads to a failure in producing
# their repr. Since that on its own isn't a very helpful error message,
# we'd rather return the bad spdict and see if the next function down the
# line can survive with a questionable spdict...
try:
log.debug(str(spdict))
except Exception as ex:
log.debug(str(ex))
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_nh3 with syntax similar to plotter()
"""
spdict = BigSpectrum_to_NH3dict(sp, vrange=vrange)
log.debug("spdict: {0}".format(spdict))
if len(spdict) > 4:
raise ValueError("Too many lines ({0}) found.".format(len(spdict)))
if len(spdict) not in (2, 3, 4):
raise ValueError("Not enough lines; don't need to use the NH3 plot "
"wrapper. If you think you are getting this message "
"incorrectly, check the velocity range (vrange "
"parameter) and make sure your spectrum overlaps with "
" it.")
plot_nh3(spdict, sp, **kwargs)
return spdict
|
low-sky/pyspeckit
|
pyspeckit/wrappers/fitnh3.py
|
Python
|
mit
| 16,036
|
[
"Gaussian"
] |
c32e6a45a5a9e22706cab7674f5ed12ba8c56c8b3fff1f8c80b015722333521c
|
# ======================================================================
# Atomistica - Interatomic potential library and molecular dynamics code
# https://github.com/Atomistica/atomistica
#
# Copyright (2005-2020) Lars Pastewka <lars.pastewka@imtek.uni-freiburg.de>
# and others. See the AUTHORS file in the top-level Atomistica directory.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""
Potential test suite.
"""
from __future__ import print_function
from math import sqrt
import numpy as np
import ase
import ase.constraints
from ase.optimize import FIRE, QuasiNewton
from ase.units import GPa
import ase.lattice.cubic as cubic
###
Jm2 = 1e23/ase.units.kJ
###
def test_forces(atoms, dx=1e-6):
"""Compute forces and compare to forces computed numerically from a
finite differences approach.
"""
f0 = atoms.get_forces().copy()
ffd = f0.copy()
for a in atoms:
r0 = a.position.copy()
a.x = r0[0]-dx
ex1 = atoms.get_potential_energy()
a.x = r0[0]+dx
ex2 = atoms.get_potential_energy()
a.x = r0[0]
a.y = r0[1]-dx
ey1 = atoms.get_potential_energy()
a.y = r0[1]+dx
ey2 = atoms.get_potential_energy()
a.y = r0[1]
a.z = r0[2]-dx
ez1 = atoms.get_potential_energy()
a.z = r0[2]+dx
ez2 = atoms.get_potential_energy()
a.z = r0[2]
ffd[a.index, 0] = -(ex2-ex1)/(2*dx)
ffd[a.index, 1] = -(ey2-ey1)/(2*dx)
ffd[a.index, 2] = -(ez2-ez1)/(2*dx)
df = ffd-f0
absdf = np.sum(df*df, axis=1)
return ffd, f0, np.max(absdf)
def test_virial(atoms, de=1e-6):
"""Compute virial and compare to virial computed numerically from a
finite differences approach.
"""
s0 = atoms.get_stress().copy()
V0 = atoms.get_volume()
sfd = np.zeros([ 3, 3 ])
c0 = atoms.get_cell().copy()
un = np.zeros([3,3])
un[0,0] = 1.0
un[1,1] = 1.0
un[2,2] = 1.0
for i in range(3):
for j in range(3):
c = c0.copy()
eps = un.copy()
eps[i, j] = un[i, j]-de
c = np.dot(c0, eps)
atoms.set_cell(c, scale_atoms=True)
e1 = atoms.get_potential_energy()
eps[i, j] = un[i, j]+de
c = np.dot(c0, eps)
atoms.set_cell(c, scale_atoms=True)
e2 = atoms.get_potential_energy()
sfd[i, j] = (e2-e1)/(2*de)
sfd = np.array( [ sfd[0,0], sfd[1,1], sfd[2,2], (sfd[1,2]+sfd[2,1])/2,
(sfd[0,2]+sfd[2,0])/2, (sfd[0,1]+sfd[1,0])/2 ] )/V0
return sfd, s0, np.max(sfd-s0)
def test_potential(atoms, dq=1e-6):
"""
Compute electrostatic potential and compare to potential computed
numerically from a finite differences approach.
"""
p0 = atoms.calc.get_electrostatic_potential().copy()
pfd = p0.copy()
for a in atoms:
q0 = a.charge
a.charge = q0-dq
eq1 = atoms.get_potential_energy()
a.charge = q0+dq
eq2 = atoms.get_potential_energy()
a.charge = q0
pfd[a.index] = (eq2-eq1)/(2*dq)
dp = pfd-p0
absdp = np.sum(dp*dp)
return pfd, p0, np.max(absdp)
def cubic_elastic_constants(a, Minimizer=None, fmax=0.025, eps=0.001):
r0 = a.get_positions().copy()
cell = a.get_cell()
sxx0, syy0, szz0, syz0, szx0, sxy0 = a.get_stress()
## C11
T = np.diag( [ eps, 0.0, 0.0 ] )
a.set_cell( np.dot(np.eye(3)+T, cell.T).T, scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile=None).run(fmax=fmax)
sxx11, syy11, szz11, syz11, szx11, sxy11 = a.get_stress()
C11 = (sxx11-sxx0)/eps
## C12 (C)
T = np.diag( [ eps, -eps/2, -eps/2 ] )
a.set_cell( np.dot(np.eye(3)+T, cell.T).T, scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile=None).run(fmax=fmax)
sxx12, syy12, szz12, syz12, szx12, sxy12 = a.get_stress()
Cp = ((sxx12-sxx0)-(syy12-syy0))/(3*eps)
C12 = C11-2*Cp
## C44
T = np.array( [ [ 0.0, 0.5*eps, 0.5*eps ], [ 0.5*eps, 0.0, 0.5*eps ], [ 0.5*eps, 0.5*eps, 0.0 ] ] )
a.set_cell( np.dot(np.eye(3)+T, cell.T).T, scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile=None).run(fmax=fmax)
sxx44, syy44, szz44, syz44, szx44, sxy44 = a.get_stress()
C44 = (syz44+szx44+sxy44-syz0-szx0-sxy0)/(3*eps)
a.set_cell( cell, scale_atoms=True )
a.set_positions(r0)
B = (C11+2*C12)/3
return ( C11, C12, C44, B, Cp )
def orthorhombic_elastic_constants(a, Minimizer=None, fmax=0.025, eps=0.001):
if Minimizer is not None:
Minimizer(a, logfile='min.log').run(fmax=fmax)
r0 = a.get_positions().copy()
cell = a.get_cell()
s0 = a.get_stress()
## C11
C11 = [ ]
for i in range(3):
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
T = np.zeros( (3,3) )
T[i, i] = eps
a.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile='min.log').run(fmax=fmax)
s = a.get_stress()
C11 += [ (s[i]-s0[i])/eps ]
## C12 (C)
Cp = [ ]
C12 = [ ]
for i in range(3):
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
T = np.zeros( (3, 3) )
j = (i+1)%3
k = (i+2)%3
T[j,j] = eps
T[k,k] = -eps
a.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile='min.log').run(fmax=fmax)
s = a.get_stress()
Cp += [ ((s[j]-s0[j])-(s[k]-s0[k]))/(4*eps) ]
## C44
C44 = [ ]
for i in range(3):
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
T = np.zeros( (3, 3) )
j = (i+1)%3
k = (i+2)%3
T[j, k] = eps
T[k, j] = eps
a.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
if Minimizer is not None:
Minimizer(a, logfile='min.log').run(fmax=fmax)
s = a.get_stress()
#sxx44, syy44, szz44, syz44, szx44, sxy44 = a.get_stress()
#C44 = (syz44+szx44+sxy44-syz0-szx0-sxy0)/(3*eps)
C44 += [ (s[3+i]-s0[3+i])/(2*eps) ]
a.set_cell( cell, scale_atoms=True )
a.set_positions(r0)
C11 = np.array(C11)
Cp = np.array(Cp)
C44 = np.array(C44)
C12 = C11-2*Cp
return ( C11, C12, C44, Cp )
def test_cubic_elastic_constants(mats, pot, par=None, sx=1, dev_thres=5,
test=None):
nok = 0
nfail = 0
try:
potname = pot.__name__
except:
potname = pot.__class__.__name__
if test is None:
print('--- %s ---' % potname)
if par is not None:
if test is None and '__ref__' in par:
print(' %s' % par['__ref__'])
c = pot(**par)
else:
c = pot
for imat in mats:
t_Ec = t_a0 = t_C11 = t_C12 = t_C44 = t_C440 = t_B = t_Cp = None
if isinstance(imat, tuple):
name, a, t_Ec, t_a0, t_C11, t_C12, t_C44, t_B, t_Cp = imat
else:
name = imat['name']
a = imat['struct']
try:
t_Ec = float(imat['Ec'])
except:
t_Ec = None
try:
t_a0 = float(imat['a0'])
except:
t_a0 = None
try:
t_C11 = float(imat['C11'])
except:
t_C11 = None
try:
t_C12 = float(imat['C12'])
except:
t_C12 = None
try:
t_C44 = float(imat['C44'])
except:
t_C44 = None
try:
t_C440 = float(imat['C440'])
except:
t_C440 = None
try:
t_B = float(imat['B'])
except:
t_B = None
try:
t_Cp = float(imat['Cp'])
except:
t_Cp = None
errmsg = 'potential: %s; material: %s' % (potname, name)
a.translate([0.1, 0.1, 0.1])
a.set_scaled_positions(a.get_scaled_positions())
a.calc = c
FIRE(
ase.constraints.StrainFilter(a, mask=[1,1,1,0,0,0]),
logfile=None).run(fmax=0.0001)
#ase.io.write('%s.cfg' % name, a)
#
# Ec
#
Ec = a.get_potential_energy()/len(a)
if t_Ec is None:
if test is None:
print('%10s: Ec = %10.3f eV' % ( name, Ec ))
else:
t_Ec = float(t_Ec)
dev = (Ec + t_Ec)*100/t_Ec
if test is None:
print('%10s: Ec = %10.3f eV (%10.3f eV - %7.2f %%)' % \
( name, Ec, t_Ec, dev ))
if test is None:
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%i %%.' % dev_thres)
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# a0
#
c1, c2, c3 = a.get_cell()
a0 = sqrt(np.dot(c1, c1))/sx
if t_a0 is None:
if test is None:
print(' a0 = %10.3f A ' % a0)
else:
t_a0 = float(t_a0)
dev = (a0 - t_a0)*100/t_a0
if test is None:
print(' a0 = %10.3f A (%10.3f A - %7.2f %%)' % \
( a0, t_a0, dev ))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%i %%.' % dev_thres)
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
C11, C12, C44, B, Cp = cubic_elastic_constants(a, eps=1e-6)
C11r, C12r, C44r, Br, Cpr = cubic_elastic_constants(
a, Minimizer=QuasiNewton, fmax=1e-8, eps=0.001)
#
# C11
#
if t_C11 is None:
if test is None:
print(' C11 = %10.4f GPa' % (C11/GPa))
else:
t_C11 = float(t_C11)
dev = (C11/GPa - t_C11)*100/t_C11
if test is None:
print(' C11 = %10.4f GPa (%10.4f GPa - ' \
'%7.2f%%)' % (C11/GPa, t_C11, dev))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % dev_thres)
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# C12
#
if t_C12 is None:
if test is None:
print(' C12 = %10.4f GPa GPa' % (C12/GPa))
else:
t_C12 = float(t_C12)
dev = (C12/GPa - t_C12)*100/t_C12
if test is None:
print(' C12 = %10.4f GPa (%10.4f GPa ' \
'- %7.2f %%)' % (C12/GPa, t_C12, dev))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % (dev_thres))
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# C44
#
if t_C44 is None:
if test is None:
print(' C44 = %10.4f GPa' % (C44r/GPa))
else:
t_C44 = float(t_C44)
dev = (C44r/GPa - t_C44)*100/t_C44
if test is None:
print(' C44 = %10.4f GPa (%10.4f GPa - ' \
'%7.2f %%)' % ( C44r/GPa, t_C44, dev ))
if test is None:
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % (dev_thres))
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# C440
#
if t_C440 is None:
if test is None:
print(' C440 = %10.4f GPa' % (C44/GPa))
else:
t_C440 = float(t_C440)
dev = (C44/GPa - t_C440)*100/t_C440
if test is None:
print(' C440 = %10.4f GPa (%10.4f GPa - ' \
'%7.2f %%)' % (C44/GPa, t_C440, dev ))
if test is None:
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % (dev_thres))
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# B
#
if t_B is None:
if test is None:
print(' B = %10.4f GPa' % (B/GPa))
else:
t_B = float(t_B)
dev = (B/GPa - t_B)*100/t_B
if test is None:
print(' B = %10.4f GPa (%10.4f GPa ' \
'- %7.2f %%)' % (B/GPa, t_B, dev))
if test is None:
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % (dev_thres))
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
#
# Cp
#
if t_Cp is None:
if test is None:
print(' Cp = %10.4f GPa' % (Cp/GPa))
else:
t_Cp = float(t_Cp)
dev = (Cp/GPa - t_Cp)*100/t_Cp
if test is None:
print(' Cp = %10.4f GPa (%10.4f GPa ' \
'- %7.2f %%)'% (Cp/GPa, t_Cp, dev))
if test is None:
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than '\
'%f %%.' % (dev_thres))
nfail += 1
else:
nok += 1
else:
test.assertTrue(abs(dev) < dev_thres, msg=errmsg)
return nok, nfail
def test_hexagonal_elastic_constants(mats, pot, par=None, sx=1, dev_thres=5,
test=None):
try:
potname = pot.__name__
except:
potname = pot.__class__.__name__
if test is None:
print('--- %s ---' % potname)
if par is not None:
if test is None and '__ref__' in par:
print(' %s' % par['__ref__'])
c = pot(**par)
else:
c = pot
for imat in mats:
if isinstance(imat, tuple):
name, a, t_Ec, t_a0, t_c0
else:
name = imat['name']
a = imat['struct']
try:
t_Ec = float(imat['Ec'])
except:
t_Ec = None
try:
t_a0 = float(imat['a0'])
except:
t_a0 = None
try:
t_c0 = float(imat['c0'])
except:
t_c0 = None
a.translate([0.1, 0.1, 0.1])
a.set_scaled_positions(a.get_scaled_positions()%1.0)
a.calc = c
FIRE(
ase.constraints.StrainFilter(a, mask=[1,1,0,0,0,0]),
logfile=None).run(fmax=0.0001)
ase.io.write('%s.cfg' % name, a)
Ec = a.get_potential_energy()/len(a)
if t_Ec is None:
print('%10s: Ec = %10.3f eV' % ( name, Ec ))
else:
dev = (Ec + t_Ec)*100/t_Ec
print('%10s: Ec = %10.3f eV (%10.3f eV - %7.2f %%)' % ( name, Ec, t_Ec, dev ))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than %i %%.' % dev_thres)
c1, c2, c3 = a.get_cell()
a0 = sqrt(np.dot(c1, c1))/sx
b0 = sqrt(np.dot(c2, c2))/sx
c0 = sqrt(np.dot(c3, c3))/sx
a0 = (a0/sqrt(3.0)+b0)/2
#a0 /= sqrt(3.0)
if t_a0 is None:
print(' a0 = %10.3f A ' % a0)
else:
dev = (a0 - t_a0)*100/t_a0
print(' a0 = %10.3f A (%10.3f A - %7.2f %%)' % ( a0, t_a0, dev ))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than %i %%.' % dev_thres)
if t_c0 is None:
print(' c0 = %10.3f A ' % c0)
else:
dev = (c0 - t_c0)*100/t_c0
print(' c0 = %10.3f A (%10.3f A - %7.2f %%)' % ( c0, t_c0, dev ))
if abs(dev) > dev_thres:
print(' --- Warning: Property off by more than %i %%.' % dev_thres)
def test_surface_energies(mats, pot, par=None, sx=1, vacuum=10.0, find_a0=True,
dev_thres=5, test=None, dump=False):
try:
potname = pot.__name__
except:
potname = pot.__class__.__name__
if test is None:
print('--- %s ---' % potname)
if par is not None:
if test is None and '__ref__' in par:
print(' %s' % par['__ref__'])
c = pot(**par)
else:
c = pot
for imat in mats:
t_Es_u = t_Es_r = t_Es_u_Jm2 = t_Es_r_Jm2 = None
if isinstance(imat, tuple):
name, a = imat
else:
name = imat['name']
a = imat['struct']
try:
t_Es_u = float(imat['u'])
except:
t_Es_u = None
try:
t_Es_r = float(imat['r'])
except:
t_Es_r = None
try:
t_Es_u_Jm2 = float(imat['u_Jm2'])
except:
t_Es_u_Jm2 = None
try:
t_Es_r_Jm2 = float(imat['r_Jm2'])
except:
t_Es_r_Jm2 = None
errmsg = 'potential: %s; material: %s' % (potname, name)
bulk = None
if type(a) == tuple:
bulk, a = a
bulk.translate([0.1, 0.1, 0.1])
bulk.set_scaled_positions(bulk.get_scaled_positions())
bulk.calc = c
a.translate([0.1, 0.1, 0.1])
a.set_scaled_positions(a.get_scaled_positions())
a.calc = c
if bulk is None:
bulk = a
if find_a0:
FIRE(
ase.constraints.StrainFilter(bulk, mask=[1,1,1,0,0,0]),
logfile=None).run(fmax=0.0001)
Ebulk = bulk.get_potential_energy()
if test is None:
print('%-20s: Ec = %10.3f eV' % (name, Ebulk/len(a)))
cx, cy, cz = bulk.get_cell().diagonal()
a.set_cell([cx,cy,cz], scale_atoms=True)
a.set_cell([cx,cy,cz+vacuum])
Eunrelaxed = a.get_potential_energy()
# Factor of two because there are two surfaces!
Es = ( Eunrelaxed - Ebulk ) / 2
Es_Jm2 = Es*Jm2/(cx*cy)
Es /= sx*sx
if test is None:
print(' Es,unrelaxed = %10.3f eV/cell ' \
'(%10.3f J/m^2)' % (Es, Es_Jm2))
else:
if t_Es_u is not None:
dev = (Es - t_Es_u)*100/t_Es_u
test.assertTrue(abs(dev) < dev_thres,
msg='Es,unrelaxed; '+errmsg)
if t_Es_u_Jm2 is not None:
dev = (Es_Jm2 - t_Es_u_Jm2)*100/t_Es_u_Jm2
test.assertTrue(abs(dev) < dev_thres,
msg='Es,relaxed; '+errmsg)
FIRE(a, logfile=None).run(fmax=0.005)
Erelaxed = a.get_potential_energy()
# Factor of two because there are two surfaces!
Es = ( Erelaxed - Ebulk ) / 2
Es_Jm2 = Es*Jm2/(cx*cy)
Es /= sx*sx
if test is None:
print(' Es,relaxed = %10.3f eV/cell ' \
'(%10.3f J/m^2)' % (Es, Es_Jm2))
else:
if t_Es_r is not None:
dev = (Es - t_Es_r)*100/t_Es_r
test.assertTrue(abs(dev) < dev_thres,
msg='Es,unrelaxed (J/m^2); '+errmsg)
if t_Es_r_Jm2 is not None:
dev = (Es_Jm2 - t_Es_r_Jm2)*100/t_Es_r_Jm2
test.assertTrue(abs(dev) < dev_thres,
msg='Es,relaxed (J/m^2); '+errmsg)
if dump:
ase.io.write('%s-%s.cfg' % ( potname, name ), a)
|
Atomistica/atomistica
|
src/python/atomistica/tests.py
|
Python
|
gpl-2.0
| 21,869
|
[
"ASE"
] |
93d6c1c93870f01bb1a3464f1d388d18c5de7a45a610d7662d178ef9081f0cb1
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
VTK_VARY_RADIUS_BY_VECTOR = 2
# create pipeline
#
reader = vtk.vtkDataSetReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/RectGrid2.vtk")
reader.Update()
toRectilinearGrid = vtk.vtkCastToConcrete()
toRectilinearGrid.SetInputConnection(reader.GetOutputPort())
toRectilinearGrid.Update()
plane = vtk.vtkRectilinearGridGeometryFilter()
plane.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
plane.SetExtent(0, 100, 0, 100, 15, 15)
warper = vtk.vtkWarpVector()
warper.SetInputConnection(plane.GetOutputPort())
warper.SetScaleFactor(0.05)
planeMapper = vtk.vtkDataSetMapper()
planeMapper.SetInputConnection(warper.GetOutputPort())
planeMapper.SetScalarRange(0.197813, 0.710419)
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
cutPlane = vtk.vtkPlane()
cutPlane.SetOrigin(reader.GetOutput().GetCenter())
cutPlane.SetNormal(1, 0, 0)
planeCut = vtk.vtkCutter()
planeCut.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
planeCut.SetCutFunction(cutPlane)
cutMapper = vtk.vtkDataSetMapper()
cutMapper.SetInputConnection(planeCut.GetOutputPort())
cutMapper.SetScalarRange(
reader.GetOutput().GetPointData().GetScalars().GetRange())
cutActor = vtk.vtkActor()
cutActor.SetMapper(cutMapper)
iso = vtk.vtkContourFilter()
iso.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
iso.SetValue(0, 0.7)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(iso.GetOutputPort())
normals.SetFeatureAngle(45)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(normals.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(GetRGBColor('bisque'))
isoActor.GetProperty().SetRepresentationToWireframe()
streamer = vtk.vtkStreamTracer()
streamer.SetInputConnection(reader.GetOutputPort())
streamer.SetStartPosition(-1.2, -0.1, 1.3)
streamer.SetMaximumPropagation(500)
streamer.SetInitialIntegrationStep(0.05)
streamer.SetIntegrationDirectionToBoth()
streamTube = vtk.vtkTubeFilter()
streamTube.SetInputConnection(streamer.GetOutputPort())
streamTube.SetRadius(0.025)
streamTube.SetNumberOfSides(6)
streamTube.SetVaryRadius(VTK_VARY_RADIUS_BY_VECTOR)
mapStreamTube = vtk.vtkPolyDataMapper()
mapStreamTube.SetInputConnection(streamTube.GetOutputPort())
mapStreamTube.SetScalarRange(
reader.GetOutput().GetPointData().GetScalars().GetRange())
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(mapStreamTube)
streamTubeActor.GetProperty().BackfaceCullingOn()
outline = vtk.vtkOutlineFilter()
outline.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(GetRGBColor('black'))
# Graphics stuff
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(planeActor)
ren1.AddActor(cutActor)
ren1.AddActor(isoActor)
ren1.AddActor(streamTubeActor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.76213, 10.712)
cam1.SetFocalPoint(-0.0842503, -0.136905, 0.610234)
cam1.SetPosition(2.53813, 2.2678, -5.22172)
cam1.SetViewUp(-0.241047, 0.930635, 0.275343)
iren.Initialize()
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Geometry/Testing/Python/rectGrid.py
|
Python
|
gpl-3.0
| 3,929
|
[
"VTK"
] |
1c744847b66a88547395848a2c5087b37d908ff11c7cee77a463dd0dd433e17a
|
# -*- coding: utf-8 -*-
#
# pynag - Python Nagios plug-in and configuration environment
# Copyright (C) 2010 Drew Stinnet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module contains low-level Parsers for nagios configuration and status objects.
Hint: If you are looking to parse some nagios configuration data, you probably
want pynag.Model module instead.
The highlights of this module are:
class Config: For Parsing nagios local nagios configuration files
class Livestatus: To connect to MK-Livestatus
class StatusDat: To read info from status.dat (not used a lot, migrate to mk-livestatus)
class LogFiles: To read nagios log-files
class MultiSite: To talk with multiple Livestatus instances
"""
import os
import re
import time
import sys
import socket # for mk_livestatus
import stat
import pynag.Plugins
import pynag.Utils
import StringIO
import tarfile
_sentinel = object()
class Config(object):
""" Parse and write nagios config files """
# Regex for beginning of object definition
# We want everything that matches:
# define <object_type> {
__beginning_of_object = re.compile("^\s*define\s+(\w+)\s*\{?(.*)$")
def __init__(self, cfg_file=None, strict=False):
""" Constructor for :py:class:`pynag.Parsers.config` class
Args:
cfg_file (str): Full path to nagios.cfg. If None, try to
auto-discover location
strict (bool): if True, use stricter parsing which is more prone to
raising exceptions
"""
self.cfg_file = cfg_file # Main configuration file
self.strict = strict # Use strict parsing or not
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
self.cfg_file = self.guess_cfg_file()
self.data = {}
self.maincfg_values = []
self._is_dirty = False
self.reset() # Initilize misc member variables
def guess_nagios_directory(self):
""" Returns a path to the nagios configuration directory on your system
Use this function for determining the nagios config directory in your
code
Returns:
str. directory containing the nagios.cfg file
Raises:
:py:class:`pynag.Parsers.ConfigFileNotFound` if cannot guess config
file location.
"""
cfg_file = self.guess_cfg_file()
if not cfg_file:
raise ConfigFileNotFound("Could not find nagios.cfg")
return os.path.dirname(cfg_file)
def guess_nagios_binary(self):
""" Returns a path to any nagios binary found on your system
Use this function if you don't want specify path to the nagios binary
in your code and you are confident that it is located in a common
location
Checked locations are as follows:
* /usr/bin/nagios
* /usr/sbin/nagios
* /usr/local/nagios/bin/nagios
* /nagios/bin/nagios
* /usr/bin/icinga
* /usr/sbin/icinga
* /usr/bin/naemon
* /usr/sbin/naemon
* /usr/local/naemon/bin/naemon.cfg
* /usr/bin/shinken
* /usr/sbin/shinken
Returns:
str. Path to the nagios binary
None if could not find a binary in any of those locations
"""
possible_files = ('/usr/bin/nagios',
'/usr/sbin/nagios',
'/usr/local/nagios/bin/nagios',
'/nagios/bin/nagios',
'/usr/bin/icinga',
'/usr/sbin/icinga',
'/usr/bin/naemon',
'/usr/sbin/naemon',
'/usr/local/naemon/bin/naemon.cfg',
'/usr/bin/shinken',
'/usr/sbin/shinken')
possible_binaries = ('nagios', 'nagios3', 'naemon', 'icinga', 'shinken')
for i in possible_binaries:
command = ['which', i]
code, stdout, stderr = pynag.Utils.runCommand(command=command, shell=False)
if code == 0:
return stdout.splitlines()[0].strip()
return None
def guess_cfg_file(self):
""" Returns a path to any nagios.cfg found on your system
Use this function if you don't want specify path to nagios.cfg in your
code and you are confident that it is located in a common location
Checked locations are as follows:
* /etc/nagios/nagios.cfg
* /etc/nagios3/nagios.cfg
* /usr/local/nagios/etc/nagios.cfg
* /nagios/etc/nagios/nagios.cfg
* ./nagios.cfg
* ./nagios/nagios.cfg
* /etc/icinga/icinga.cfg
* /usr/local/icinga/etc/icinga.cfg
* ./icinga.cfg
* ./icinga/icinga.cfg
* /etc/naemon/naemon.cfg
* /usr/local/naemon/etc/naemon.cfg
* ./naemon.cfg
* ./naemon/naemon.cfg
* /etc/shinken/shinken.cfg
Returns:
str. Path to the nagios.cfg or equivalent file
None if couldn't find a file in any of these locations.
"""
possible_files = ('/etc/nagios/nagios.cfg',
'/etc/nagios3/nagios.cfg',
'/usr/local/nagios/etc/nagios.cfg',
'/nagios/etc/nagios/nagios.cfg',
'./nagios.cfg',
'./nagios/nagios.cfg',
'/etc/icinga/icinga.cfg',
'/usr/local/icinga/etc/icinga.cfg',
'./icinga.cfg',
'./icinga/icinga.cfg',
'/etc/naemon/naemon.cfg',
'/usr/local/naemon/etc/naemon.cfg',
'./naemon.cfg',
'./naemon/naemon.cfg',
'/etc/shinken/shinken.cfg',
)
for file_path in possible_files:
if self.isfile(file_path):
return file_path
return None
def reset(self):
""" Reinitializes the data of a parser instance to its default values.
"""
self.cfg_files = [] # List of other configuration files
self.data = {} # dict of every known object definition
self.errors = [] # List of ParserErrors
self.item_list = None
self.item_cache = None
self.maincfg_values = [] # The contents of main nagios.cfg
self._resource_values = [] # The contents of any resource_files
self.item_apply_cache = {} # This is performance tweak used by _apply_template
# This is a pure listof all the key/values in the config files. It
# shouldn't be useful until the items in it are parsed through with the proper
# 'use' relationships
self.pre_object_list = []
self.post_object_list = []
self.object_type_keys = {
'hostgroup': 'hostgroup_name',
'hostextinfo': 'host_name',
'host': 'host_name',
'service': 'name',
'servicegroup': 'servicegroup_name',
'contact': 'contact_name',
'contactgroup': 'contactgroup_name',
'timeperiod': 'timeperiod_name',
'command': 'command_name',
#'service':['host_name','description'],
}
def _has_template(self, target):
""" Determine if an item has a template associated with it
Args:
target (dict): Parsed item as parsed by :py:class:`pynag.Parsers.config`
"""
return 'use' in target
def _get_pid(self):
""" Checks the lock_file var in nagios.cfg and returns the pid from the file
If the pid file does not exist, returns None.
"""
try:
return self.open(self.get_cfg_value('lock_file'), "r").readline().strip()
except Exception:
return None
def _get_hostgroup(self, hostgroup_name):
""" Returns the hostgroup that matches the queried name.
Args:
hostgroup_name: Name of the hostgroup to be returned (string)
Returns:
Hostgroup item with hostgroup_name that matches the queried name.
"""
return self.data['all_hostgroup'].get(hostgroup_name, None)
def _get_key(self, object_type, user_key=None):
""" Return the correct 'key' for an item.
This is mainly a helper method for other methods in this class. It is
used to shorten code repetition.
Args:
object_type: Object type from which to obtain the 'key' (string)
user_key: User defined key. Default None. (string)
Returns:
Correct 'key' for the object type. (string)
"""
if not user_key and not object_type in self.object_type_keys:
raise ParserError("Unknown key for object type: %s\n" % object_type)
# Use a default key
if not user_key:
user_key = self.object_type_keys[object_type]
return user_key
def _get_item(self, item_name, item_type):
""" Return an item from a list
Creates a cache of items in self.pre_object_list and returns an element
from this cache. Looks for an item with corresponding name and type.
Args:
item_name: Name of the item to be returned (string)
item_type: Type of the item to be returned (string)
Returns:
Item with matching name and type from
:py:attr:`pynag.Parsers.config.item_cache`
"""
# create local cache for performance optimizations. TODO: Rewrite functions that call this function
if not self.item_list:
self.item_list = self.pre_object_list
self.item_cache = {}
for item in self.item_list:
if not "name" in item:
continue
name = item['name']
tmp_item_type = (item['meta']['object_type'])
if not tmp_item_type in self.item_cache:
self.item_cache[tmp_item_type] = {}
self.item_cache[tmp_item_type][name] = item
my_cache = self.item_cache.get(item_type, None)
if not my_cache:
return None
return my_cache.get(item_name, None)
def _apply_template(self, original_item):
""" Apply all attributes of item named parent_name to "original_item".
Applies all of the attributes of parents (from the 'use' field) to item.
Args:
original_item: Item 'use'-ing a parent item. The parent's attributes
will be concretely added to this item.
Returns:
original_item to which have been added all the attributes defined
in parent items.
"""
# TODO: There is space for more performance tweaks here
# If item does not inherit from anyone else, lets just return item as is.
if 'use' not in original_item:
return original_item
object_type = original_item['meta']['object_type']
raw_definition = original_item['meta']['raw_definition']
my_cache = self.item_apply_cache.get(object_type, {})
# Performance tweak, if item has been parsed. Lets not do it again
if raw_definition in my_cache:
return my_cache[raw_definition]
parent_names = original_item['use'].split(',')
parent_items = []
for parent_name in parent_names:
parent_item = self._get_item(parent_name, object_type)
if parent_item is None:
error_string = "Can not find any %s named %s\n" % (object_type, parent_name)
self.errors.append(ParserError(error_string, item=original_item))
continue
try:
# Parent item probably has use flags on its own. So lets apply to parent first
parent_item = self._apply_template(parent_item)
except RuntimeError:
t, e = sys.exc_info()[:2]
self.errors.append(ParserError("Error while parsing item: %s (it might have circular use=)" % str(e),
item=original_item))
parent_items.append(parent_item)
inherited_attributes = original_item['meta']['inherited_attributes']
template_fields = original_item['meta']['template_fields']
for parent_item in parent_items:
for k, v in parent_item.iteritems():
if k in ('use', 'register', 'meta', 'name'):
continue
if k not in inherited_attributes:
inherited_attributes[k] = v
if k not in original_item:
original_item[k] = v
template_fields.append(k)
if 'name' in original_item:
my_cache[raw_definition] = original_item
return original_item
def _get_items_in_file(self, filename):
""" Return all items in the given file
Iterates through all elements in self.data and gatehrs all the items
defined in the queried filename.
Args:
filename: file from which are defined the items that will be
returned.
Returns:
A list containing all the items in self.data that were defined in
filename
"""
return_list = []
for k in self.data.keys():
for item in self[k]:
if item['meta']['filename'] == filename:
return_list.append(item)
return return_list
def get_new_item(self, object_type, filename):
""" Returns an empty item with all necessary metadata
Creates a new item dict and fills it with usual metadata:
* object_type : object_type (arg)
* filename : filename (arg)
* template_fields = []
* needs_commit = None
* delete_me = None
* defined_attributes = {}
* inherited_attributes = {}
* raw_definition = "define %s {\\n\\n} % object_type"
Args:
object_type: type of the object to be created (string)
filename: Path to which the item will be saved (string)
Returns:
A new item with default metadata
"""
meta = {
'object_type': object_type,
'filename': filename,
'template_fields': [],
'needs_commit': None,
'delete_me': None,
'defined_attributes': {},
'inherited_attributes': {},
'raw_definition': "define %s {\n\n}" % object_type,
}
return {'meta': meta}
def _load_file(self, filename):
""" Parses filename with self.parse_filename and append results in self._pre_object_list
This function is mostly here for backwards compatibility
Args:
filename: the file to be parsed. This is supposed to a nagios object definition file
"""
for i in self.parse_file(filename):
self.pre_object_list.append(i)
def parse_file(self, filename):
""" Parses a nagios object configuration file and returns lists of dictionaries.
This is more or less a wrapper around :py:meth:`config.parse_string`,
so reading documentation there is useful.
Args:
filename: Path to the file to parse (string)
Returns:
A list containing elements parsed by :py:meth:`parse_string`
"""
try:
raw_string = self.open(filename, 'rb').read()
return self.parse_string(raw_string, filename=filename)
except IOError:
t, e = sys.exc_info()[:2]
parser_error = ParserError(e.strerror)
parser_error.filename = e.filename
self.errors.append(parser_error)
return []
def parse_string(self, string, filename='None'):
""" Parses a string, and returns all object definitions in that string
Args:
string: A string containing one or more object definitions
filename (optional): If filename is provided, it will be referenced
when raising exceptions
Examples:
>>> test_string = "define host {\\nhost_name examplehost\\n}\\n"
>>> test_string += "define service {\\nhost_name examplehost\\nservice_description example service\\n}\\n"
>>> c = config()
>>> result = c.parse_string(test_string)
>>> for i in result: print i.get('host_name'), i.get('service_description', None)
examplehost None
examplehost example service
Returns:
A list of dictionaries, that look like self.data
Raises:
:py:class:`ParserError`
"""
append = ""
current = None
in_definition = {}
tmp_buffer = []
result = []
for sequence_no, line in enumerate(string.splitlines(False)):
line_num = sequence_no + 1
# If previous line ended with backslash, treat this line as a
# continuation of previous line
if append:
line = append + line
append = None
# Cleanup and line skips
line = line.strip()
if line == "":
continue
if line[0] == "#" or line[0] == ';':
continue
# If this line ends with a backslash, continue directly to next line
if line.endswith('\\'):
append = line.strip('\\')
continue
if line.startswith('}'): # end of object definition
if not in_definition:
p = ParserError("Unexpected '}' found outside object definition in line %s" % line_num)
p.filename = filename
p.line_start = line_num
raise p
in_definition = None
current['meta']['line_end'] = line_num
# Looks to me like nagios ignores everything after the } so why shouldn't we ?
rest = line.split("}", 1)[1]
tmp_buffer.append(line)
try:
current['meta']['raw_definition'] = '\n'.join(tmp_buffer)
except Exception:
raise ParserError("Encountered Unexpected end of object definition in file '%s'." % filename)
result.append(current)
# Destroy the Nagios Object
current = None
continue
elif line.startswith('define'): # beginning of object definition
if in_definition:
msg = "Unexpected 'define' in {filename} on line {line_num}. was expecting '}}'."
msg = msg.format(**locals())
self.errors.append(ParserError(msg, item=current))
m = self.__beginning_of_object.search(line)
tmp_buffer = [line]
object_type = m.groups()[0]
if self.strict and object_type not in self.object_type_keys.keys():
raise ParserError(
"Don't know any object definition of type '%s'. it is not in a list of known object definitions." % object_type)
current = self.get_new_item(object_type, filename)
current['meta']['line_start'] = line_num
# Start off an object
in_definition = True
# Looks to me like nagios ignores everything after the {, so why shouldn't we ?
rest = m.groups()[1]
continue
else: # In the middle of an object definition
tmp_buffer.append(' ' + line)
# save whatever's left in the buffer for the next iteration
if not in_definition:
append = line
continue
# this is an attribute inside an object definition
if in_definition:
#(key, value) = line.split(None, 1)
tmp = line.split(None, 1)
if len(tmp) > 1:
(key, value) = tmp
else:
key = tmp[0]
value = ""
# Strip out in-line comments
if value.find(";") != -1:
value = value.split(";", 1)[0]
# Clean info
key = key.strip()
value = value.strip()
# Rename some old values that may be in the configuration
# This can probably be removed in the future to increase performance
if (current['meta']['object_type'] == 'service') and key == 'description':
key = 'service_description'
# Special hack for timeperiods as they are not consistent with other objects
# We will treat whole line as a key with an empty value
if (current['meta']['object_type'] == 'timeperiod') and key not in ('timeperiod_name', 'alias'):
key = line
value = ''
current[key] = value
current['meta']['defined_attributes'][key] = value
# Something is wrong in the config
else:
raise ParserError("Error: Unexpected token in file '%s'" % filename)
# Something is wrong in the config
if in_definition:
raise ParserError("Error: Unexpected EOF in file '%s'" % filename)
return result
def _locate_item(self, item):
""" This is a helper function for anyone who wishes to modify objects.
It takes "item", locates the file which is configured in, and locates
exactly the lines which contain that definition.
Returns: (tuple)
(everything_before, object_definition, everything_after, filename):
* everything_before (list of lines): Every line in filename before object was defined
* everything_after (list of lines): Every line in "filename" after object was defined
* object_definition (list of lines): Every line used to define our item in "filename"
* filename (string): file in which the object was written to
Raises:
:py:class:`ValueError` if object was not found in "filename"
"""
if "filename" in item['meta']:
filename = item['meta']['filename']
else:
raise ValueError("item does not have a filename")
# Look for our item, store it as my_item
for i in self.parse_file(filename):
if self.compareObjects(item, i):
my_item = i
break
else:
raise ValueError("We could not find object in %s\n%s" % (filename, item))
# Caller of this method expects to be returned
# several lists that describe the lines in our file.
# The splitting logic starts here.
my_file = self.open(filename)
all_lines = my_file.readlines()
my_file.close()
start = my_item['meta']['line_start'] - 1
end = my_item['meta']['line_end']
everything_before = all_lines[:start]
object_definition = all_lines[start:end]
everything_after = all_lines[end:]
# If there happen to be line continuations in the object we will edit
# We will remove them from object_definition
object_definition = self._clean_backslashes(object_definition)
return everything_before, object_definition, everything_after, filename
def _clean_backslashes(self, list_of_strings):
""" Returns list_of_strings with all all strings joined that ended with backslashes
Args:
list_of_strings: List of strings to join
Returns:
Another list of strings, which lines ending with \ joined together.
"""
tmp_buffer = ''
result = []
for i in list_of_strings:
if i.endswith('\\\n'):
tmp_buffer += i.strip('\\\n')
else:
result.append(tmp_buffer + i)
tmp_buffer = ''
return result
def _modify_object(self, item, field_name=None, new_value=None, new_field_name=None, new_item=None,
make_comments=False):
""" Locates "item" and changes the line which contains field_name.
Helper function for object_* functions. Locates "item" and changes the
line which contains field_name. If new_value and new_field_name are both
None, the attribute is removed.
Args:
item(dict): The item to be modified
field_name(str): The field_name to modify (if any)
new_field_name(str): If set, field_name will be renamed
new_value(str): If set the value of field_name will be changed
new_item(str): If set, whole object will be replaced with this
string
make_comments: If set, put pynag-branded comments where changes
have been made
Returns:
True on success
Raises:
:py:class:`ValueError` if object or field_name is not found
:py:class:`IOError` is save is unsuccessful.
"""
if item is None:
return
if field_name is None and new_item is None:
raise ValueError("either field_name or new_item must be set")
if '\n' in str(new_value):
raise ValueError("Invalid character \\n used as an attribute value.")
everything_before, object_definition, everything_after, filename = self._locate_item(item)
if new_item is not None:
# We have instruction on how to write new object, so we dont need to parse it
object_definition = [new_item]
else:
change = None
value = None
i = 0
for i in range(len(object_definition)):
tmp = object_definition[i].split(None, 1)
if len(tmp) == 0:
continue
# Hack for timeperiods, they dont work like other objects
elif item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
tmp = [object_definition[i]]
# we can't change timeperiod, so we fake a field rename
if new_value is not None:
new_field_name = new_value
new_value = None
value = ''
elif len(tmp) == 1:
value = ''
else:
value = tmp[1]
k = tmp[0].strip()
if k == field_name:
# Attribute was found, lets change this line
if new_field_name is None and new_value is None:
# We take it that we are supposed to remove this attribute
change = object_definition.pop(i)
break
elif new_field_name:
# Field name has changed
k = new_field_name
if new_value is not None:
# value has changed
value = new_value
# Here we do the actual change
change = "\t%-30s%s\n" % (k, value)
if item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
change = "\t%s\n" % new_field_name
object_definition[i] = change
break
if not change and new_value is not None:
# Attribute was not found. Lets add it
change = "\t%-30s%s\n" % (field_name, new_value)
object_definition.insert(i, change)
# Lets put a banner in front of our item
if make_comments:
comment = '# Edited by PyNag on %s\n' % time.ctime()
if len(everything_before) > 0:
last_line_before = everything_before[-1]
if last_line_before.startswith('# Edited by PyNag on'):
everything_before.pop() # remove this line
object_definition.insert(0, comment)
# Here we overwrite the config-file, hoping not to ruin anything
str_buffer = "%s%s%s" % (''.join(everything_before), ''.join(object_definition), ''.join(everything_after))
self.write(filename, str_buffer)
return True
def open(self, filename, *args, **kwargs):
""" Wrapper around global open()
Simply calls global open(filename, *args, **kwargs) and passes all arguments
as they are received. See global open() function for more details.
"""
return open(filename, *args, **kwargs)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def write(self, filename, string):
""" Wrapper around open(filename).write()
Writes string to filename and closes the file handler. File handler is
openned in `'w'` mode.
Args:
filename: File where *string* will be written. This is the path to
the file. (string)
string: String to be written to file. (string)
Returns:
Return code as returned by :py:meth:`os.write`
"""
fh = self.open(filename, 'w')
return_code = fh.write(string)
fh.flush()
# os.fsync(fh)
fh.close()
self._is_dirty = True
return return_code
def item_rewrite(self, item, str_new_item):
""" Completely rewrites item with string provided.
Args:
item: Item that is to be rewritten
str_new_item: str representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_rewrite( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item=str_new_item)
def item_remove(self, item):
""" Delete one specific item from its configuration files
Args:
item: Item that is to be rewritten
str_new_item: string representation of the new item
..
In the following line, every "\\n" is actually a simple line break
This is only a little patch for the generated documentation.
Examples::
item_remove( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, new_item="")
def item_edit_field(self, item, field_name, new_value):
""" Modifies one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to be modified. Its field `field_name` will be set to
`new_value`.
field_name: Name of the field that will be modified. (str)
new_value: Value to which will be set the field `field_name`. (str)
Example usage::
edit_object( item, field_name="host_name", new_value="examplehost.example.com") # doctest: +SKIP
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item, field_name=field_name, new_value=new_value)
def item_remove_field(self, item, field_name):
""" Removes one field of a (currently existing) object.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to remove field from.
field_name: Field to remove. (string)
Example usage::
item_remove_field( item, field_name="contactgroups" )
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=field_name, new_value=None, new_field_name=None)
def item_rename_field(self, item, old_field_name, new_field_name):
""" Renames a field of a (currently existing) item.
Changes are immediate (i.e. there is no commit).
Args:
item: Item to modify.
old_field_name: Name of the field that will have its name changed. (string)
new_field_name: New name given to `old_field_name` (string)
Example usage::
item_rename_field(item, old_field_name="normal_check_interval", new_field_name="check_interval")
Returns:
True on success
Raises:
:py:class:`ValueError` if object is not found
:py:class:`IOError` if save fails
"""
return self._modify_object(item=item, field_name=old_field_name, new_field_name=new_field_name)
def item_add(self, item, filename):
""" Adds a new object to a specified config file.
Args:
item: Item to be created
filename: Filename that we are supposed to write the new item to.
This is the path to the file. (string)
Returns:
True on success
Raises:
:py:class:`IOError` on failed save
"""
if not 'meta' in item:
item['meta'] = {}
item['meta']['filename'] = filename
# Create directory if it does not already exist
dirname = os.path.dirname(filename)
if not self.isdir(dirname):
os.makedirs(dirname)
str_buffer = self.print_conf(item)
fh = self.open(filename, 'a')
fh.write(str_buffer)
fh.close()
return True
def edit_object(self, item, field_name, new_value):
""" Modifies a (currently existing) item.
Changes are immediate (i.e. there is no commit)
Args:
item: Item to modify.
field_name: Field that will be updated.
new_value: Updated value of field `field_name`
Example Usage:
edit_object( item, field_name="host_name", new_value="examplehost.example.com")
Returns:
True on success
.. WARNING::
THIS FUNCTION IS DEPRECATED. USE item_edit_field() instead
"""
return self.item_edit_field(item=item, field_name=field_name, new_value=new_value)
def compareObjects(self, item1, item2):
""" Compares two items. Returns true if they are equal
Compares every key: value pair for both items. If anything is different,
the items will not be considered equal.
Args:
item1, item2: Items to be compared.
Returns:
True -- Items are equal
False -- Items are not equal
"""
keys1 = item1['meta']['defined_attributes'].keys()
keys2 = item2['meta']['defined_attributes'].keys()
keys1.sort()
keys2.sort()
result = True
if keys1 != keys2:
return False
for key in keys1:
if key == 'meta':
continue
key1 = item1[key]
key2 = item2[key]
# For our purpose, 30 is equal to 30.000
if key == 'check_interval':
key1 = int(float(key1))
key2 = int(float(key2))
if str(key1) != str(key2):
result = False
if result is False:
return False
return True
def edit_service(self, target_host, service_description, field_name, new_value):
""" Edit a service's attributes
Takes a host, service_description pair to identify the service to modify
and sets its field `field_name` to `new_value`.
Args:
target_host: name of the host to which the service is attached to. (string)
service_description: Service description of the service to modify. (string)
field_name: Field to modify. (string)
new_value: Value to which the `field_name` field will be updated (string)
Returns:
True on success
Raises:
:py:class:`ParserError` if the service is not found
"""
original_object = self.get_service(target_host, service_description)
if original_object is None:
raise ParserError("Service not found")
return self.edit_object(original_object, field_name, new_value)
def _get_list(self, item, key):
""" Return a comma list from an item
Args:
item: Item from which to select value. (string)
key: Field name of the value to select and return as a list. (string)
Example::
_get_list(Foo_object, host_name)
define service {
service_description Foo
host_name larry,curly,moe
}
returns
['larry','curly','moe']
Returns:
A list of the item's values of `key`
Raises:
:py:class:`ParserError` if item is not a dict
"""
if not isinstance(item, dict):
raise ParserError("%s is not a dictionary\n" % item)
# return []
if not key in item:
return []
return_list = []
if item[key].find(",") != -1:
for name in item[key].split(","):
return_list.append(name)
else:
return_list.append(item[key])
# Alphabetize
return_list.sort()
return return_list
def delete_object(self, object_type, object_name, user_key=None):
""" Delete object from configuration files
Args:
object_type: Type of the object to delete from configuration files.
object_name: Name of the object to delete from configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
item = self.get_object(object_type=object_type, object_name=object_name, user_key=user_key)
return self.item_remove(item)
def delete_service(self, service_description, host_name):
""" Delete service from configuration files
Args:
service_description: service_description field value of the object
to delete from configuration files.
host_name: host_name field value of the object to delete from
configuration files.
Returns:
True on success.
"""
item = self.get_service(host_name, service_description)
return self.item_remove(item)
def delete_host(self, object_name, user_key=None):
""" Delete a host from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('host', object_name, user_key=user_key)
def delete_hostgroup(self, object_name, user_key=None):
""" Delete a hostgroup from its configuration files
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
True on success.
"""
return self.delete_object('hostgroup', object_name, user_key=user_key)
def get_object(self, object_type, object_name, user_key=None):
""" Return a complete object dictionary
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: User defined key. Default None. (string)
Returns:
The item found to match all the criterias.
None if object is not found
"""
object_key = self._get_key(object_type, user_key)
for item in self.data['all_%s' % object_type]:
if item.get(object_key, None) == object_name:
return item
return None
def get_host(self, object_name, user_key=None):
""" Return a host object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('host', object_name, user_key=user_key)
def get_servicegroup(self, object_name, user_key=None):
""" Return a Servicegroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicegroup', object_name, user_key=user_key)
def get_contact(self, object_name, user_key=None):
""" Return a Contact object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contact', object_name, user_key=user_key)
def get_contactgroup(self, object_name, user_key=None):
""" Return a Contactgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('contactgroup', object_name, user_key=user_key)
def get_timeperiod(self, object_name, user_key=None):
""" Return a Timeperiod object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('timeperiod', object_name, user_key=user_key)
def get_command(self, object_name, user_key=None):
""" Return a Command object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('command', object_name, user_key=user_key)
def get_hostgroup(self, object_name, user_key=None):
""" Return a hostgroup object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostgroup', object_name, user_key=user_key)
def get_servicedependency(self, object_name, user_key=None):
""" Return a servicedependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('servicedependency', object_name, user_key=user_key)
def get_hostdependency(self, object_name, user_key=None):
""" Return a hostdependency object
Args:
object_name: object_name field value of the object to delete from
configuration files.
user_key: user_key to pass to :py:meth:`get_object`
Returns:
The item found to match all the criterias.
"""
return self.get_object('hostdependency', object_name, user_key=user_key)
def get_service(self, target_host, service_description):
""" Return a service object
Args:
target_host: host_name field of the service to be returned. This is
the host to which is attached the service.
service_description: service_description field of the service to be
returned.
Returns:
The item found to match all the criterias.
"""
for item in self.data['all_service']:
if item.get('service_description') == service_description and item.get('host_name') == target_host:
return item
return None
def _append_use(self, source_item, name):
""" Append attributes to source_item that are inherited via 'use' attribute'
Args:
source_item: item (dict) to apply the inheritance upon
name: obsolete (discovered automatically via source_item['use'].
Here for compatibility.
Returns:
Source Item with appended attributes.
Raises:
:py:class:`ParserError` on recursion errors
"""
# Remove the 'use' key
if "use" in source_item:
del source_item['use']
for possible_item in self.pre_object_list:
if "name" in possible_item:
# Start appending to the item
for k, v in possible_item.iteritems():
try:
if k == 'use':
source_item = self._append_use(source_item, v)
except Exception:
raise ParserError("Recursion error on %s %s" % (source_item, v))
# Only add the item if it doesn't already exist
if not k in source_item:
source_item[k] = v
return source_item
def _post_parse(self):
""" Creates a few optimization tweaks and easy access lists in self.data
Creates :py:attr:`config.item_apply_cache` and fills the all_object
item lists in self.data.
"""
self.item_list = None
self.item_apply_cache = {} # This is performance tweak used by _apply_template
for raw_item in self.pre_object_list:
# Performance tweak, make sure hashmap exists for this object_type
object_type = raw_item['meta']['object_type']
if not object_type in self.item_apply_cache:
self.item_apply_cache[object_type] = {}
# Tweak ends
if "use" in raw_item:
raw_item = self._apply_template(raw_item)
self.post_object_list.append(raw_item)
# Add the items to the class lists.
for list_item in self.post_object_list:
type_list_name = "all_%s" % list_item['meta']['object_type']
if not type_list_name in self.data:
self.data[type_list_name] = []
self.data[type_list_name].append(list_item)
def commit(self):
""" Write any changes that have been made to it's appropriate file """
# Loops through ALL items
for k in self.data.keys():
for item in self[k]:
# If the object needs committing, commit it!
if item['meta']['needs_commit']:
# Create file contents as an empty string
file_contents = ""
# find any other items that may share this config file
extra_items = self._get_items_in_file(item['meta']['filename'])
if len(extra_items) > 0:
for commit_item in extra_items:
# Ignore files that are already set to be deleted:w
if commit_item['meta']['delete_me']:
continue
# Make sure we aren't adding this thing twice
if item != commit_item:
file_contents += self.print_conf(commit_item)
# This is the actual item that needs commiting
if not item['meta']['delete_me']:
file_contents += self.print_conf(item)
# Write the file
filename = item['meta']['filename']
self.write(filename, file_contents)
# Recreate the item entry without the commit flag
self.data[k].remove(item)
item['meta']['needs_commit'] = None
self.data[k].append(item)
def flag_all_commit(self):
""" Flag every item in the configuration to be committed
This should probably only be used for debugging purposes
"""
for object_type in self.data.keys():
for item in self.data[object_type]:
item['meta']['needs_commit'] = True
def print_conf(self, item):
""" Return a string that can be used in a configuration file
Args:
item: Item to be dumped as a string.
Returns:
String representation of item.
"""
output = ""
# Header, to go on all files
output += "# Configuration file %s\n" % item['meta']['filename']
output += "# Edited by PyNag on %s\n" % time.ctime()
# Some hostgroup information
if "hostgroup_list" in item['meta']:
output += "# Hostgroups: %s\n" % ",".join(item['meta']['hostgroup_list'])
# Some hostgroup information
if "service_list" in item['meta']:
output += "# Services: %s\n" % ",".join(item['meta']['service_list'])
# Some hostgroup information
if "service_members" in item['meta']:
output += "# Service Members: %s\n" % ",".join(item['meta']['service_members'])
if len(item['meta']['template_fields']) != 0:
output += "# Values from templates:\n"
for k in item['meta']['template_fields']:
output += "#\t %-30s %-30s\n" % (k, item[k])
output += "\n"
output += "define %s {\n" % item['meta']['object_type']
for k, v in item.iteritems():
if v is None:
# Skip entries with No value
continue
if k != 'meta':
if k not in item['meta']['template_fields']:
output += "\t %-30s %-30s\n" % (k, v)
output += "}\n\n"
return output
def _load_static_file(self, filename=None):
""" Load a general config file (like nagios.cfg) that has key=value config file format. Ignore comments
Arguments:
filename: name of file to parse, if none nagios.cfg will be used
Returns:
a [ (key,value), (key,value) ] list
"""
result = []
if not filename:
filename = self.cfg_file
for line in self.open(filename).readlines():
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
tmp = line.split("=", 1)
if len(tmp) < 2:
continue
key, value = tmp
key = key.strip()
value = value.strip()
result.append((key, value))
return result
def _edit_static_file(self, attribute, new_value, filename=None, old_value=None, append=False):
""" Modify a general config file (like nagios.cfg) that has a key=value config file format.
Arguments:
filename: Name of config file that will be edited (i.e. nagios.cfg)
attribute: name of attribute to edit (i.e. check_external_commands)
new_value: new value for the said attribute (i.e. "1"). None deletes
the line.
old_value: Useful if multiple attributes exist (i.e. cfg_dir) and
you want to replace a specific one.
append: If true, do not overwrite current setting. Instead append
this at the end. Use this with settings that are repeated like
cfg_file.
Examples::
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='check_external_commands', new_value='1')
_edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='cfg_dir', new_value='/etc/nagios/okconfig', append=True)
"""
if filename is None:
filename = self.cfg_file
# For some specific attributes, append should be implied
if attribute in ('cfg_file', 'cfg_dir', 'broker_module'):
append = True
# If/when we make a change, new_line is what will be written
new_line = '%s=%s\n' % (attribute, new_value)
# new_value=None means line should be removed
if new_value is None:
new_line = ''
write_buffer = self.open(filename).readlines()
is_dirty = False # dirty if we make any changes
for i, line in enumerate(write_buffer):
# Strip out new line characters
line = line.strip()
# Skip blank lines
if line == "":
continue
# Skip comments
if line[0] == "#" or line[0] == ';':
continue
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
# If key does not match, we are not interested in this line
if key != attribute:
continue
# If old_value was specified, and it matches, dont have to look any further
elif value == old_value:
write_buffer[i] = new_line
is_dirty = True
break
# if current value is the same as new_value, no need to make changes
elif value == new_value:
return False
# Special so cfg_dir matches despite double-slashes, etc
elif attribute == 'cfg_dir' and new_value and os.path.normpath(value) == os.path.normpath(new_value):
return False
# We are not appending, and no old value was specified:
elif append is False and not old_value:
write_buffer[i] = new_line
is_dirty = True
break
if is_dirty is False and new_value is not None:
# If we get here, it means we read the whole file,
# and we have not yet made any changes, So we assume
# We should append to the file
write_buffer.append(new_line)
is_dirty = True
# When we get down here, it is time to write changes to file
if is_dirty is True:
str_buffer = ''.join(write_buffer)
self.write(filename, str_buffer)
return True
else:
return False
def needs_reload(self):
""" Checks if the Nagios service needs a reload.
Returns:
True if Nagios service needs reload of cfg files
False if reload not needed or Nagios is not running
"""
if not self.maincfg_values:
self.reset()
self.parse_maincfg()
new_timestamps = self.get_timestamps()
object_cache_file = self.get_cfg_value('object_cache_file')
if self._get_pid() is None:
return False
if not object_cache_file:
return True
if not self.isfile(object_cache_file):
return True
object_cache_timestamp = new_timestamps.get(object_cache_file, 0)
# Reload not needed if no object_cache file
if object_cache_file is None:
return False
for k, v in new_timestamps.items():
if not v or int(v) > object_cache_timestamp:
return True
return False
def needs_reparse(self):
""" Checks if the Nagios configuration needs to be reparsed.
Returns:
True if any Nagios configuration file has changed since last parse()
"""
# If Parse has never been run:
if self.data == {}:
return True
# If previous save operation has forced a reparse
if self._is_dirty is True:
return True
# If we get here, we check the timestamps of the configs
new_timestamps = self.get_timestamps()
if len(new_timestamps) != len(self.timestamps):
return True
for k, v in new_timestamps.items():
if self.timestamps.get(k, None) != v:
return True
return False
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse_maincfg(self):
""" Parses your main configuration (nagios.cfg) and stores it as key/value pairs in self.maincfg_values
This function is mainly used by config.parse() which also parses your
whole configuration set.
Raises:
py:class:`ConfigFileNotFound`
"""
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
raise ConfigFileNotFound('Could not find nagios.cfg')
self.maincfg_values = self._load_static_file(self.cfg_file)
@pynag.Utils.synchronized(pynag.Utils.rlock)
def parse(self):
""" Parse all objects in your nagios configuration
This functions starts by loading up your nagios.cfg ( parse_maincfg() )
then moving on to your object configuration files (as defined via
cfg_file and cfg_dir) and and your resource_file as well.
Returns:
None
Raises:
:py:class:`IOError` if unable to read any file due to permission
problems
"""
# reset
self.reset()
self.parse_maincfg()
self.cfg_files = self.get_cfg_files()
# When parsing config, we will softly fail if permission denied
# comes on resource files. If later someone tries to get them via
# get_resource, we will fail hard
try:
self._resource_values = self.get_resources()
except IOError:
t, e = sys.exc_info()[:2]
self.errors.append(str(e))
self.timestamps = self.get_timestamps()
# This loads everything into
for cfg_file in self.cfg_files:
self._load_file(cfg_file)
self._post_parse()
self._is_dirty = False
def get_resource(self, resource_name):
""" Get a single resource value which can be located in any resource.cfg file
Arguments:
resource_name: Name as it appears in resource file (i.e. $USER1$)
Returns:
String value of the resource value.
Raises:
:py:class:`KeyError` if resource is not found
:py:class:`ParserError` if resource is not found and you do not have
permissions
"""
resources = self.get_resources()
for k, v in resources:
if k == resource_name:
return v
def get_timestamps(self):
""" Returns hash map of all nagios related files and their timestamps"""
files = {}
files[self.cfg_file] = None
for k, v in self.maincfg_values:
if k in ('resource_file', 'lock_file', 'object_cache_file'):
files[v] = None
for i in self.get_cfg_files():
files[i] = None
# Now lets lets get timestamp of every file
for k, v in files.items():
if not self.isfile(k):
continue
files[k] = self.stat(k).st_mtime
return files
def isfile(self, *args, **kwargs):
""" Wrapper around os.path.isfile """
return os.path.isfile(*args, **kwargs)
def isdir(self, *args, **kwargs):
""" Wrapper around os.path.isdir """
return os.path.isdir(*args, **kwargs)
def islink(self, *args, **kwargs):
""" Wrapper around os.path.islink """
return os.path.islink(*args, **kwargs)
def readlink(selfself, *args, **kwargs):
""" Wrapper around os.readlink """
return os.readlink(*args, **kwargs)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat """
return os.stat(*args, **kwargs)
def remove(self, *args, **kwargs):
""" Wrapper around os.remove """
return os.remove(*args, **kwargs)
def access(self, *args, **kwargs):
""" Wrapper around os.access """
return os.access(*args, **kwargs)
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir """
return os.listdir(*args, **kwargs)
def exists(self, *args, **kwargs):
""" Wrapper around os.path.exists """
return os.path.exists(*args, **kwargs)
def get_resources(self):
"""Returns a list of every private resources from nagios.cfg"""
resources = []
for config_object, config_value in self.maincfg_values:
if config_object == 'resource_file' and self.isfile(config_value):
resources += self._load_static_file(config_value)
return resources
def extended_parse(self):
""" This parse is used after the initial parse() command is run.
It is only needed if you want extended meta information about hosts or other objects
"""
# Do the initial parsing
self.parse()
# First, cycle through the hosts, and append hostgroup information
index = 0
for host in self.data['all_host']:
if host.get("register", None) == "0":
continue
if not "host_name" in host:
continue
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
# Append any hostgroups that are directly listed in the host definition
if "hostgroups" in host:
for hostgroup_name in self._get_list(host, 'hostgroups'):
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)
# Append any services which reference this host
service_list = []
for service in self.data['all_service']:
if service.get("register", None) == "0":
continue
if not "service_description" in service:
continue
if host['host_name'] in self._get_active_hosts(service):
service_list.append(service['service_description'])
self.data['all_host'][index]['meta']['service_list'] = service_list
# Increment count
index += 1
# Loop through all hostgroups, appending them to their respective hosts
for hostgroup in self.data['all_hostgroup']:
for member in self._get_list(hostgroup, 'members'):
index = 0
for host in self.data['all_host']:
if not "host_name" in host:
continue
# Skip members that do not match
if host['host_name'] == member:
# Create the meta var if it doesn' exist
if not "hostgroup_list" in self.data['all_host'][index]['meta']:
self.data['all_host'][index]['meta']['hostgroup_list'] = []
if hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:
self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])
# Increment count
index += 1
# Expand service membership
index = 0
for service in self.data['all_service']:
# Find a list of hosts to negate from the final list
self.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)
# Increment count
index += 1
def _get_active_hosts(self, item):
""" Given an object, return a list of active hosts.
This will exclude hosts that are negated with a "!"
Args:
item: Item to obtain active hosts from.
Returns:
List of all the active hosts for `item`
"""
# First, generate the negation list
negate_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] == "!":
hostgroup_obj = self.get_hostgroup(hostgroup_name[1:])
negate_hosts.extend(self._get_list(hostgroup_obj, 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] == "!":
negate_hosts.append(host_name[1:])
# Now get hosts that are actually listed
active_hosts = []
# Hostgroups
if "hostgroup_name" in item:
for hostgroup_name in self._get_list(item, 'hostgroup_name'):
if hostgroup_name[0] != "!":
active_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name), 'members'))
# Host Names
if "host_name" in item:
for host_name in self._get_list(item, 'host_name'):
if host_name[0] != "!":
active_hosts.append(host_name)
# Combine the lists
return_hosts = []
for active_host in active_hosts:
if active_host not in negate_hosts:
return_hosts.append(active_host)
return return_hosts
def get_cfg_dirs(self):
""" Parses the main config file for configuration directories
Returns:
List of all cfg directories used in this configuration
Example::
print(get_cfg_dirs())
['/etc/nagios/hosts','/etc/nagios/objects',...]
"""
cfg_dirs = []
for config_object, config_value in self.maincfg_values:
if config_object == "cfg_dir":
cfg_dirs.append(config_value)
return cfg_dirs
def get_cfg_files(self):
""" Return a list of all cfg files used in this configuration
Filenames are normalised so that if nagios.cfg specifies relative
filenames we will convert it to fully qualified filename before returning.
Returns:
List of all configurations files used in the configuration.
Example:
print(get_cfg_files())
['/etc/nagios/hosts/host1.cfg','/etc/nagios/hosts/host2.cfg',...]
"""
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
# Parse all files in a cfg directory
if config_object == "cfg_dir":
config_value = self.abspath(config_value)
directories = []
raw_file_list = []
directories.append(config_value)
# Walk through every subdirectory and add to our list
while directories:
current_directory = directories.pop(0)
# Nagios doesnt care if cfg_dir exists or not, so why should we ?
if not self.isdir(current_directory):
continue
for item in self.listdir(current_directory):
# Append full path to file
item = "%s" % (os.path.join(current_directory, item.strip()))
if self.islink(item):
item = os.readlink(item)
if self.isdir(item):
directories.append(item)
if raw_file_list.count(item) < 1:
raw_file_list.append(item)
for raw_file in raw_file_list:
if raw_file.endswith('.cfg'):
if self.exists(raw_file) and not self.isdir(raw_file):
# Nagios doesnt care if cfg_file exists or not, so we will not throws errors
cfg_files.append(raw_file)
return cfg_files
def abspath(self, path):
""" Return the absolute path of a given relative path.
The current working directory is assumed to be the dirname of nagios.cfg
Args:
path: relative path to be transformed into absolute path. (string)
Returns:
Absolute path of given relative path.
Example:
>>> c = config(cfg_file="/etc/nagios/nagios.cfg")
>>> c.abspath('nagios.cfg')
'/etc/nagios/nagios.cfg'
>>> c.abspath('/etc/nagios/nagios.cfg')
'/etc/nagios/nagios.cfg'
"""
if not isinstance(path, str):
return ValueError("Path must be a string got %s instead" % type(path))
if path.startswith('/'):
return path
nagiosdir = os.path.dirname(self.cfg_file)
normpath = os.path.abspath(os.path.join(nagiosdir, path))
return normpath
def get_cfg_value(self, key):
""" Returns one specific value from your nagios.cfg file,
None if value is not found.
Arguments:
key: what attribute to fetch from nagios.cfg (example: "command_file" )
Returns:
String of the first value found for
Example:
>>> c = Config() # doctest: +SKIP
>>> log_file = c.get_cfg_value('log_file') # doctest: +SKIP
# Should return something like "/var/log/nagios/nagios.log"
"""
if not self.maincfg_values:
self.parse_maincfg()
for k, v in self.maincfg_values:
if k == key:
return v
return None
def get_object_types(self):
""" Returns a list of all discovered object types """
return map(lambda x: re.sub("all_", "", x), self.data.keys())
def cleanup(self):
""" Remove configuration files that have no configuration items """
for filename in self.cfg_files:
if not self.parse_file(filename): # parse_file returns empty list on empty files
self.remove(filename)
# If nagios.cfg specifies this file directly via cfg_file directive then...
for k, v in self.maincfg_values:
if k == 'cfg_file' and v == filename:
self._edit_static_file(k, old_value=v, new_value=None)
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
class Livestatus(object):
""" Wrapper around MK-Livestatus
Example usage::
s = Livestatus()
for hostgroup s.get_hostgroups():
print(hostgroup['name'], hostgroup['num_hosts'])
"""
def __init__(self, livestatus_socket_path=None, nagios_cfg_file=None, authuser=None):
""" Initilize a new instance of Livestatus
Args:
livestatus_socket_path: Path to livestatus socket (if none specified,
use one specified in nagios.cfg)
nagios_cfg_file: Path to your nagios.cfg. If None then try to
auto-detect
authuser: If specified. Every data pulled is with the access rights
of that contact.
"""
self.nagios_cfg_file = nagios_cfg_file
self.error = None
if not livestatus_socket_path:
c = config(cfg_file=nagios_cfg_file)
c.parse_maincfg()
self.nagios_cfg_file = c.cfg_file
# Look for a broker_module line in the main config and parse its arguments
# One of the arguments is path to the file socket created
for k, v in c.maincfg_values:
if k == 'broker_module' and "livestatus.o" in v:
for arg in v.split()[1:]:
if arg.startswith('/') or '=' not in arg:
livestatus_socket_path = arg
break
else:
# If we get here, then we could not locate a broker_module argument
# that looked like a filename
msg = "No Livestatus socket defined. Make sure livestatus broker module is loaded."
raise ParserError(msg)
self.livestatus_socket_path = livestatus_socket_path
self.authuser = authuser
def test(self, raise_error=True):
""" Test if connection to livestatus socket is working
Args:
raise_error: If set to True, raise exception if test fails,otherwise return False
Raises:
ParserError if raise_error == True and connection fails
Returns:
True -- Connection is OK
False -- there are problems and raise_error==False
"""
try:
self.query("GET hosts")
except Exception:
t, e = sys.exc_info()[:2]
self.error = e
if raise_error:
raise ParserError("got '%s' when testing livestatus socket. error was: '%s'" % (type(e), e))
else:
return False
return True
def _get_socket(self):
""" Returns a socket.socket() instance to communicate with livestatus
Socket might be either unix filesocket or a tcp socket depenging in
the content of :py:attr:`livestatus_socket_path`
Returns:
Socket to livestatus instance (socket.socket)
Raises:
:py:class:`LivestatusNotConfiguredException` on failed connection.
:py:class:`ParserError` If could not parse configured TCP address
correctly.
"""
if not self.livestatus_socket_path:
msg = "We could not find path to MK livestatus socket file. Make sure MK livestatus is installed and configured"
raise LivestatusNotConfiguredException(msg)
try:
# If livestatus_socket_path contains a colon, then we assume that it is tcp socket instead of a local filesocket
if self.livestatus_socket_path.find(':') > 0:
address, tcp_port = self.livestatus_socket_path.split(':', 1)
if not tcp_port.isdigit():
msg = 'Could not parse host:port "%s". %s does not look like a valid port is not a valid tcp port.'
raise ParserError(msg % (self.livestatus_socket_path, tcp_port))
tcp_port = int(tcp_port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, tcp_port))
else:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.livestatus_socket_path)
return s
except IOError:
t, e = sys.exc_info()[:2]
msg = "%s while connecting to '%s'. Make sure nagios is running and mk_livestatus loaded."
raise ParserError(msg % (e, self.livestatus_socket_path))
def query(self, query, *args, **kwargs):
""" Performs LQL queries the livestatus socket
Queries are corrected and convienient default data are added to the
query before sending it to the socket.
Args:
query: Query to be passed to the livestatus socket (string)
args, kwargs: Additionnal parameters that will be sent to
:py:meth:`pynag.Utils.grep_to_livestatus`. The result will be
appended to the query.
Returns:
Answer from livestatus. It will be in python format unless specified
otherwise.
Raises:
:py:class:`ParserError` if problems connecting to livestatus.
"""
# columns parameter is here for backwards compatibility only
kwargs.pop('columns', None)
# We break query up into a list, of commands, then before sending command to the socket
# We will write it one line per item in the array
query = query.split('\n')
query += pynag.Utils.grep_to_livestatus(*args, **kwargs)
# If no response header was specified, we add fixed16
response_header = None
if not filter(lambda x: x.startswith('ResponseHeader:'), query):
query.append("ResponseHeader: fixed16")
response_header = "fixed16"
# If no specific outputformat is requested, we will return in python format
python_format = False
if not filter(lambda x: x.startswith('OutputFormat:'), query):
query.append("OutputFormat: python")
python_format = True
# There is a bug in livestatus where if requesting Stats, then no column headers are sent from livestatus
# In later version, the headers are sent, but the output is corrupted.
#
# We maintain consistency by clinging on to the old bug, and if there are Stats in the output
# we will not ask for column headers
doing_stats = len(filter(lambda x: x.startswith('Stats:'), query)) > 0
if not filter(lambda x: x.startswith('Stats:'), query) and not filter(
lambda x: x.startswith('ColumnHeaders: on'), query):
query.append("ColumnHeaders: on")
# Check if we need to add authuser to the query
if not filter(lambda x: x.startswith('AuthUser:'), query) and self.authuser not in (None, ''):
query.append("AuthUser: %s" % self.authuser)
# When we reach here, we are done adding options to the query, so we convert to the string that will
# be sent to the livestatus socket
query = '\n'.join(query) + '\n'
self.last_query = query
#
# Lets create a socket and see if we can write to it
#
s = self._get_socket()
try:
s.send(query)
except IOError:
msg = "Could not write to socket '%s'. Make sure you have the right permissions"
raise ParserError(msg % self.livestatus_socket_path)
s.shutdown(socket.SHUT_WR)
tmp = s.makefile()
# Read the response header from livestatus
if response_header == "fixed16":
response_data = tmp.readline()
if len(response_data) == 0:
return []
return_code = response_data.split()[0]
if not return_code.startswith('2'):
error_message = tmp.readline().strip()
raise ParserError("Error '%s' from livestatus: %s" % (return_code, error_message))
answer = tmp.read()
# We are done with the livestatus socket. lets close it
s.close()
if answer == '':
return []
# If something other than python format was requested, we return the answer as is
if python_format is False:
return answer
# If we reach down here, it means we are supposed to parse the output before returning it
try:
answer = eval(answer)
except Exception:
raise ParserError("Error, could not parse response from livestatus.\n%s" % answer)
# Workaround for livestatus bug, where column headers are not provided even if we asked for them
if doing_stats is True and len(answer) == 1:
return answer[0]
columns = answer.pop(0)
# Lets throw everything into a hashmap before we return
result = []
for line in answer:
tmp = {}
for i, column in enumerate(line):
column_name = columns[i]
tmp[column_name] = column
result.append(tmp)
return result
def get(self, table, *args, **kwargs):
""" Same as self.query('GET %s' % (table,))
Extra arguments will be appended to the query.
Args:
table: Table from which the data will be retrieved
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Example::
get('contacts', 'Columns: name alias')
Returns:
Answer from livestatus in python format.
"""
return self.query('GET %s' % (table,), *args, **kwargs)
def get_host(self, host_name):
""" Performs a GET query for a particular host
This performs::
'''GET hosts
Filter: host_name = %s''' % host_name
Args:
host_name: name of the host to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hosts', 'Filter: host_name = %s' % host_name)[0]
def get_service(self, host_name, service_description):
""" Performs a GET query for a particular service
This performs::
'''GET services
Filter: host_name = %s
Filter: service_description = %s''' % (host_name, service_description)
Args:
host_name: name of the host the target service is attached to.
service_description: Description of the service to obtain livestatus
data from.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET services', 'Filter: host_name = %s' % host_name,
'Filter: description = %s' % service_description)[0]
def get_hosts(self, *args, **kwargs):
""" Performs a GET query for all hosts
This performs::
'''GET hosts %s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hosts', *args, **kwargs)
def get_services(self, *args, **kwargs):
""" Performs a GET query for all services
This performs::
'''GET services
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET services', *args, **kwargs)
def get_hostgroups(self, *args, **kwargs):
""" Performs a GET query for all hostgroups
This performs::
'''GET hostgroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hostgroups', *args, **kwargs)
def get_servicegroups(self, *args, **kwargs):
""" Performs a GET query for all servicegroups
This performs::
'''GET servicegroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET servicegroups', *args, **kwargs)
def get_contactgroups(self, *args, **kwargs):
""" Performs a GET query for all contactgroups
This performs::
'''GET contactgroups
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contactgroups', *args, **kwargs)
def get_contacts(self, *args, **kwargs):
""" Performs a GET query for all contacts
This performs::
'''GET contacts
%s %s''' % (*args, **kwargs)
Args:
args, kwargs: These will be appendend to the end of the query to
perform additionnal instructions.
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contacts', *args, **kwargs)
def get_contact(self, contact_name):
""" Performs a GET query for a particular contact
This performs::
'''GET contacts
Filter: contact_name = %s''' % contact_name
Args:
contact_name: name of the contact to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contacts', 'Filter: contact_name = %s' % contact_name)[0]
def get_servicegroup(self, name):
""" Performs a GET query for a particular servicegroup
This performs::
'''GET servicegroups
Filter: servicegroup_name = %s''' % servicegroup_name
Args:
servicegroup_name: name of the servicegroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET servicegroups', 'Filter: name = %s' % name)[0]
def get_hostgroup(self, name):
""" Performs a GET query for a particular hostgroup
This performs::
'''GET hostgroups
Filter: hostgroup_name = %s''' % hostgroup_name
Args:
hostgroup_name: name of the hostgroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET hostgroups', 'Filter: name = %s' % name)[0]
def get_contactgroup(self, name):
""" Performs a GET query for a particular contactgroup
This performs::
'''GET contactgroups
Filter: contactgroup_name = %s''' % contactgroup_name
Args:
contactgroup_name: name of the contactgroup to obtain livestatus data from
Returns:
Answer from livestatus in python format.
"""
return self.query('GET contactgroups', 'Filter: name = %s' % name)[0]
class RetentionDat(object):
""" Easy way to parse the content of retention.dat
After calling parse() contents of retention.dat are kept in self.data
Example Usage::
r = retention()
r.parse()
print r
print r.data['info']
"""
def __init__(self, filename=None, cfg_file=None):
""" Initilize a new instance of retention.dat
Args (you only need to provide one of these):
filename: path to your retention.dat file
cfg_file: path to your nagios.cfg file, path to retention.dat will
be looked up in this file
"""
# If filename is not provided, lets try to discover it from
# nagios.cfg
if filename is None:
c = config(cfg_file=cfg_file)
for key, value in c._load_static_file():
if key == "state_retention_file":
filename = value
self.filename = filename
self.data = None
def parse(self):
""" Parses your status.dat file and stores in a dictionary under self.data
Returns:
None
Raises:
:py:class:`ParserError`: if problem arises while reading status.dat
:py:class:`ParserError`: if status.dat is not found
:py:class:`IOError`: if status.dat cannot be read
"""
self.data = {}
status = {} # Holds all attributes of a single item
key = None # if within definition, store everything before =
value = None # if within definition, store everything after =
if not self.filename:
raise ParserError("status.dat file not found")
lines = open(self.filename, 'rb').readlines()
for sequence_no, line in enumerate(lines):
line_num = sequence_no + 1
# Cleanup and line skips
line = line.strip()
if line == "":
pass
elif line[0] == "#" or line[0] == ';':
pass
elif line.find("{") != -1:
status = {}
status['meta'] = {}
status['meta']['type'] = line.split("{")[0].strip()
elif line.find("}") != -1:
# Status definition has finished, lets add it to
# self.data
if status['meta']['type'] not in self.data:
self.data[status['meta']['type']] = []
self.data[status['meta']['type']].append(status)
else:
tmp = line.split("=", 1)
if len(tmp) == 2:
(key, value) = line.split("=", 1)
status[key] = value
elif key == "long_plugin_output":
# special hack for long_output support. We get here if:
# * line does not contain {
# * line does not contain }
# * line does not contain =
# * last line parsed started with long_plugin_output=
status[key] += "\n" + line
else:
raise ParserError("Error on %s:%s: Could not parse line: %s" % (self.filename, line_num, line))
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
return self.data[key]
def __str__(self):
if not self.data:
self.parse()
str_buffer = "# Generated by pynag"
for datatype, datalist in self.data.items():
for item in datalist:
str_buffer += "%s {\n" % datatype
for attr, value in item.items():
str_buffer += "%s=%s\n" % (attr, value)
str_buffer += "}\n"
return str_buffer
class StatusDat(RetentionDat):
""" Easy way to parse status.dat file from nagios
After calling parse() contents of status.dat are kept in status.data
Example usage::
>>> s = status()
>>> s.parse()
>>> keys = s.data.keys()
>>> 'info' in keys
True
>>> 'programstatus' in keys
True
>>> for service in s.data.get('servicestatus',[]):
... host_name=service.get('host_name', None)
... description=service.get('service_description',None)
"""
def __init__(self, filename=None, cfg_file=None):
""" Initilize a new instance of status
Args (you only need to provide one of these):
filename: path to your status.dat file
cfg_file: path to your nagios.cfg file, path to status.dat will be
looked up in this file
"""
# If filename is not provided, lets try to discover it from
# nagios.cfg
if filename is None:
c = config(cfg_file=cfg_file)
for key, value in c._load_static_file():
if key == "status_file":
filename = value
self.filename = filename
self.data = None
def get_contactstatus(self, contact_name):
""" Returns a dictionary derived from status.dat for one particular contact
Args:
contact_name: `contact_name` field of the contact's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the contact.
Raises:
ValueError if object is not found
Example:
>>> s = status()
>>> s.get_contactstatus(contact_name='invalid_contact')
ValueError('invalid_contact',)
>>> first_contact = s.data['contactstatus'][0]['contact_name']
>>> s.get_contactstatus(first_contact)['contact_name'] == first_contact
True
"""
if self.data is None:
self.parse()
for i in self.data['contactstatus']:
if i.get('contact_name') == contact_name:
return i
return ValueError(contact_name)
def get_hoststatus(self, host_name):
""" Returns a dictionary derived from status.dat for one particular contact
Args:
host_name: `host_name` field of the host's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the host.
Raises:
ValueError if object is not found
"""
if self.data is None:
self.parse()
for i in self.data['hoststatus']:
if i.get('host_name') == host_name:
return i
raise ValueError(host_name)
def get_servicestatus(self, host_name, service_description):
""" Returns a dictionary derived from status.dat for one particular service
Args:
service_name: `service_name` field of the host's status.dat data
to parse and return as a dict.
Returns:
dict derived from status.dat for the service.
Raises:
ValueError if object is not found
"""
if self.data is None:
self.parse()
for i in self.data['servicestatus']:
if i.get('host_name') == host_name:
if i.get('service_description') == service_description:
return i
raise ValueError(host_name, service_description)
class ObjectCache(Config):
""" Loads the configuration as it appears in objects.cache file """
def get_cfg_files(self):
for k, v in self.maincfg_values:
if k == 'object_cache_file':
return [v]
class ParserError(Exception):
""" ParserError is used for errors that the Parser has when parsing config.
Typical usecase when there is a critical error while trying to read configuration.
"""
filename = None
line_start = None
message = None
def __init__(self, message, item=None):
""" Creates an instance of ParserError
Args:
message: Message to be printed by the error
item: Pynag item who caused the error
"""
self.message = message
if item is None:
return
self.item = item
self.filename = item['meta']['filename']
self.line_start = item['meta'].get('line_start')
def __str__(self):
message = self.message
if self.filename and self.line_start:
message = '%s in %s, line %s' % (message, self.filename, self.line_start)
return repr(message)
class ConfigFileNotFound(ParserError):
""" This exception is thrown if we cannot locate any nagios.cfg-style config file. """
pass
class LivestatusNotConfiguredException(ParserError):
""" This exception is raised if we tried to autodiscover path to livestatus and failed """
class LogFiles(object):
""" Parses Logfiles defined in nagios.cfg and allows easy access to its content
Content is stored in python-friendly arrays of dicts. Output should be more
or less compatible with mk_livestatus log output
"""
def __init__(self, maincfg=None):
self.config = config(maincfg)
self.log_file = self.config.get_cfg_value('log_file')
self.log_archive_path = self.config.get_cfg_value('log_archive_path')
def get_log_entries(self, start_time=None, end_time=None, strict=True, search=None, **kwargs):
""" Get Parsed log entries for given timeperiod.
Args:
start_time: unix timestamp. if None, return all entries from today
end_time: If specified, only fetch log entries older than this (unix
timestamp)
strict: If True, only return entries between start_time and
end_time, if False, then return entries that belong to same log
files as given timeset
search: If provided, only return log entries that contain this
string (case insensitive)
kwargs: All extra arguments are provided as filter on the log
entries. f.e. host_name="localhost"
Returns:
List of dicts
"""
now = time.time()
if end_time is None:
end_time = now
if start_time is None:
if 'filename' in kwargs:
start_time = 1
else:
seconds_in_a_day = 60 * 60 * 24
seconds_today = end_time % seconds_in_a_day # midnight of today
start_time = end_time - seconds_today
start_time = int(start_time)
end_time = int(end_time)
logfiles = self.get_logfiles()
if 'filename' in kwargs:
logfiles = filter(lambda x: x == kwargs.get('filename'), logfiles)
# If start time was provided, skip all files that we last modified
# before start_time
if start_time:
logfiles = filter(lambda x: start_time <= os.stat(x).st_mtime, logfiles)
# Log entries are returned in ascending order, which is the opposite of
# what get_logfiles returns.
logfiles.reverse()
result = []
for log_file in logfiles:
entries = self._parse_log_file(filename=log_file)
if len(entries) == 0:
continue
first_entry = entries[0]
last_entry = entries[-1]
if first_entry['time'] > end_time:
continue
# If strict, filter entries to only include the ones in the timespan
if strict is True:
entries = [x for x in entries if x['time'] >= start_time and x['time'] <= end_time]
# If search string provided, filter the string
if search is not None:
entries = [x for x in entries if x['message'].lower().find(search.lower()) > -1]
for k, v in kwargs.items():
entries = [x for x in entries if x.get(k) == v]
result += entries
if start_time is None or int(start_time) >= int(first_entry.get('time')):
continue
# Now, logfiles should in MOST cases come sorted for us.
# However we rely on modification time of files and if it is off,
# We want to make sure log entries are coming in the correct order.
# The following sort should not impact performance in the typical use case.
result.sort(key=lambda x: x.get('time'))
return result
def get_logfiles(self):
""" Returns a list with the fullpath to every log file used by nagios.
Lists are sorted by modification times. Newest logfile is at the front
of the list so usually nagios.log comes first, followed by archivelogs
Returns:
List of strings
"""
logfiles = []
for filename in os.listdir(self.log_archive_path):
full_path = "%s/%s" % (self.log_archive_path, filename)
logfiles.append(full_path)
logfiles.append(self.log_file)
# Sort the logfiles by modification time, newest file at the front
compare_mtime = lambda a, b: os.stat(a).st_mtime < os.stat(b).st_mtime
logfiles.sort(key=lambda x: int(os.stat(x).st_mtime))
# Newest logfiles go to the front of the list
logfiles.reverse()
return logfiles
def get_flap_alerts(self, **kwargs):
""" Same as :py:meth:`get_log_entries`, except return timeperiod transitions.
Takes same parameters.
"""
return self.get_log_entries(class_name="timeperiod transition", **kwargs)
def get_notifications(self, **kwargs):
""" Same as :py:meth:`get_log_entries`, except return only notifications.
Takes same parameters.
"""
return self.get_log_entries(class_name="notification", **kwargs)
def get_state_history(self, start_time=None, end_time=None, host_name=None, strict=True, service_description=None):
""" Returns a list of dicts, with the state history of hosts and services.
Args:
start_time: unix timestamp. if None, return all entries from today
end_time: If specified, only fetch log entries older than this (unix
timestamp)
host_name: If provided, only return log entries that contain this
string (case insensitive)
service_description: If provided, only return log entries that contain this
string (case insensitive)
Returns:
List of dicts with state history of hosts and services
"""
log_entries = self.get_log_entries(start_time=start_time, end_time=end_time, strict=strict, class_name='alerts')
result = []
last_state = {}
now = time.time()
for line in log_entries:
if 'state' not in line:
continue
line['duration'] = now - int(line.get('time'))
if host_name is not None and host_name != line.get('host_name'):
continue
if service_description is not None and service_description != line.get('service_description'):
continue
if start_time is None:
start_time = int(line.get('time'))
short_name = "%s/%s" % (line['host_name'], line['service_description'])
if short_name in last_state:
last = last_state[short_name]
last['end_time'] = line['time']
last['duration'] = last['end_time'] - last['time']
line['previous_state'] = last['state']
last_state[short_name] = line
if strict is True:
if start_time is not None and int(start_time) > int(line.get('time')):
continue
if end_time is not None and int(end_time) < int(line.get('time')):
continue
result.append(line)
return result
def _parse_log_file(self, filename=None):
""" Parses one particular nagios logfile into arrays of dicts.
Args:
filename: Log file to be parsed. If is None, then log_file from
nagios.cfg is used.
Returns:
A list of dicts containing all data from the log file
"""
if filename is None:
filename = self.log_file
result = []
for line in open(filename).readlines():
parsed_entry = self._parse_log_line(line)
if parsed_entry != {}:
parsed_entry['filename'] = filename
result.append(parsed_entry)
return result
def _parse_log_line(self, line):
""" Parse one particular line in nagios logfile and return a dict.
Args:
line: Line of the log file to be parsed.
Returns:
dict containing the information from the log file line.
"""
host = None
service_description = None
state = None
check_attempt = None
plugin_output = None
contact = None
m = re.search('^\[(.*?)\] (.*?): (.*)', line)
if m is None:
return {}
line = line.strip()
timestamp, logtype, options = m.groups()
result = {}
try:
timestamp = int(timestamp)
except ValueError:
timestamp = 0
result['time'] = int(timestamp)
result['type'] = logtype
result['options'] = options
result['message'] = line
result['class'] = 0 # unknown
result['class_name'] = 'unclassified'
if logtype in ('CURRENT HOST STATE', 'CURRENT SERVICE STATE', 'SERVICE ALERT', 'HOST ALERT'):
result['class'] = 1
result['class_name'] = 'alerts'
if logtype.find('HOST') > -1:
# This matches host current state:
m = re.search('(.*?);(.*?);(.*);(.*?);(.*)', options)
if m is None:
return result
host, state, hard, check_attempt, plugin_output = m.groups()
service_description = None
if logtype.find('SERVICE') > -1:
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
host, service_description, state, hard, check_attempt, plugin_output = m.groups()
result['host_name'] = host
result['service_description'] = service_description
result['state'] = int(pynag.Plugins.state[state])
result['check_attempt'] = check_attempt
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif "NOTIFICATION" in logtype:
result['class'] = 3
result['class_name'] = 'notification'
if logtype == 'SERVICE NOTIFICATION':
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
contact, host, service_description, state, command, plugin_output = m.groups()
elif logtype == 'HOST NOTIFICATION':
m = re.search('(.*?);(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
contact, host, state, command, plugin_output = m.groups()
service_description = None
result['contact_name'] = contact
result['host_name'] = host
result['service_description'] = service_description
try:
result['state'] = int(pynag.Plugins.state[state])
except Exception:
result['state'] = -1
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif logtype == "EXTERNAL COMMAND":
result['class'] = 5
result['class_name'] = 'command'
m = re.search('(.*?);(.*)', options)
if m is None:
return result
command_name, text = m.groups()
result['command_name'] = command_name
result['text'] = text
elif logtype in ('PASSIVE SERVICE CHECK', 'PASSIVE HOST CHECK'):
result['class'] = 4
result['class_name'] = 'passive'
if logtype.find('HOST') > -1:
# This matches host current state:
m = re.search('(.*?);(.*?);(.*)', options)
if m is None:
return result
host, state, plugin_output = m.groups()
service_description = None
if logtype.find('SERVICE') > -1:
m = re.search('(.*?);(.*?);(.*?);(.*)', options)
if m is None:
return result
host, service_description, state, plugin_output = m.groups()
result['host_name'] = host
result['service_description'] = service_description
result['state'] = state
result['plugin_output'] = plugin_output
result['text'] = plugin_output
elif logtype in ('SERVICE FLAPPING ALERT', 'HOST FLAPPING ALERT'):
result['class_name'] = 'flapping'
elif logtype == 'TIMEPERIOD TRANSITION':
result['class_name'] = 'timeperiod_transition'
elif logtype == 'Warning':
result['class_name'] = 'warning'
result['state'] = "1"
result['text'] = options
if 'text' not in result:
result['text'] = result['options']
result['log_class'] = result['class'] # since class is a python keyword
return result
class ExtraOptsParser(object):
""" Get Nagios Extra-Opts from a config file as specified by http://nagiosplugins.org/extra-opts
We could ALMOST use pythons ConfParser but nagios plugin team thought it would be a
good idea to support multiple values per key, so a dict datatype no longer works.
Its a shame because we have to make our own "ini" parser as a result
Usage::
# cat /etc/nagios/plugins.ini
[main]
host_name = localhost
[other section]
host_name = example.com
# EOF
e = ExtraOptsParser(section_name='main', config_file='/etc/nagios/plugins.ini')
e.get('host_name') # returns "localhost"
e.get_values() # Returns a dict of all the extra opts
e.getlist('host_name') # returns all values of host_name (if more than one were specified) in a list
"""
standard_locations = [
"/etc/nagios/plugins.ini",
"/usr/local/nagios/etc/plugins.ini",
"/usr/local/etc/nagios/plugins.ini",
"/etc/opt/nagios/plugins.ini",
"/etc/nagios-plugins.ini",
"/usr/local/etc/nagios-plugins.ini",
"/etc/opt/nagios-plugins.ini",
]
def __init__(self, section_name=None, config_file=None):
if not section_name:
section_name = self.get_default_section_name()
if not config_file:
config_file = self.get_default_config_file()
self.section_name = section_name
self.config_file = config_file
self._all_options = self.parse_file(filename=config_file) or {}
def get_values(self):
""" Returns a dict with all extra-options with the granted section_name and config_file
Results are in the form of::
{
'key': ["possible","values"]
}
"""
return self._all_options.get(self.section_name, {})
def get_default_section_name(self):
""" According to extra-opts standard, the default should be filename of check script being run """
return os.path.basename(sys.argv[0])
def get_default_config_file(self):
""" Return path to first readable extra-opt config-file found
According to the nagiosplugins extra-opts spec the search method is as follows:
1. Search for nagios.ini or nagios-plugins.ini in : splitted variable NAGIOS_CONFIG_PATH
2. Search in a predefined list of files
3. Return None if no config file is found
The method works as follows:
To quote the spec on NAGIOS_CONFIG_PATH:
*"To use a custom location, set a NAGIOS_CONFIG_PATH environment
variable to the set of directories that should be checked (this is a
colon-separated list just like PATH). The first plugins.ini or
nagios-plugins.ini file found in these directories will be used."*
"""
search_path = []
nagios_config_path = os.environ.get('NAGIOS_CONFIG_PATH', '')
for path in nagios_config_path.split(':'):
search_path.append(os.path.join(path, 'plugins.ini'))
search_path.append(os.path.join(path, 'nagios-plugins.ini'))
search_path += self.standard_locations
self.search_path = search_path
for path in search_path:
if os.path.isfile(path):
return path
return None
def get(self, option_name, default=_sentinel):
""" Return the value of one specific option
Args:
option_name: The value set to this option will be returned
Returns:
The value of `option_name`
Raises:
:py:class:`ValueError` when `option_name` cannot be found in options
"""
result = self.getlist(option_name, default)
# If option was not found, raise error
if result == _sentinel:
raise ValueError("Option named %s was not found" % (option_name))
elif result == default:
return result
elif not result:
# empty list
return result
else:
return result[0]
def getlist(self, option_name, default=_sentinel):
""" Return a list of all values for option_name
Args:
option_name: All the values set to this option will be returned
Returns:
List containing all the options set to `option_name`
Raises:
:py:class:`ValueError` when `option_name` cannot be found in options
"""
result = self.get_values().get(option_name, default)
if result == _sentinel:
raise ValueError("Option named %s was not found" % (option_name))
return result
def parse_file(self, filename):
""" Parses an ini-file and returns a dict of the ini values.
The datatype returned is a list of sections where each section is a
dict of values.
Args:
filename: Full path to the ini-file to be parsed.
Example the following the file::
[main]
name = this is a name
key = value
key = value2
Would return::
[
{'main':
{
'name': ['this is a name'],
'key': [value, value2]
}
},
]
"""
if filename is None:
return {}
f = open(filename)
try:
data = f.read()
return self.parse_string(data)
finally:
f.close()
def parse_string(self, string):
""" Parses a string that is supposed to be ini-style format.
See :py:meth:`parse_file` for more info
Args:
string: String to be parsed. Should be in ini-file format.
Returns:
Dictionnary containing all the sections of the ini-file and their
respective data.
Raises:
:py:class:`ParserError` when line does not follow the ini format.
"""
sections = {}
# When parsing inside a section, the name of it stored here.
section_name = None
current_section = pynag.Utils.defaultdict(dict)
for line_no, line, in enumerate(string.splitlines()):
line = line.strip()
# skip empty lines
if not line or line[0] in ('#', ';'):
continue
# Check if this is a new section
if line.startswith('[') and line.endswith(']'):
section_name = line.strip('[').strip(']').strip()
current_section = pynag.Utils.defaultdict(list)
sections[section_name] = current_section
continue
# All entries should have key=value format
if not '=' in line:
error = "Line %s should be in the form of key=value format (got '%s' instead)" % (line_no, line)
raise ParserError(error)
# If we reach here, we parse current line into key and a value section
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
sections[section_name][key].append(value)
return sections
class SshConfig(Config):
""" Parse object configuration files from remote host via ssh
Uses python-paramiko for ssh connections.
"""
def __init__(self, host, username, password=None, cfg_file=None):
""" Creates a SshConfig instance
Args:
host: Host to connect to
username: User to connect with
password: Password for `username`
cfg_file: Nagios main cfg file
"""
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(host, username=username, password=password)
self.ftp = self.ssh.open_sftp()
import cStringIO
c = cStringIO.StringIO()
self.tar = tarfile.open(mode='w', fileobj=c)
self.cached_stats = {}
super(SshConfig, self).__init__(cfg_file=cfg_file)
def open(self, filename, *args, **kwargs):
""" Behaves like file.open only, via ssh connection """
return self.tar.extractfile(filename)
tarinfo = self._get_file(filename)
string = tarinfo.tobuf()
print string
return StringIO.StringIO(string)
return self.tar.extractfile(tarinfo)
def add_to_tar(self, path):
"""
"""
print "Taring ", path
command = "find '{path}' -type f | tar -c -T - --to-stdout --absolute-names"
command = command.format(path=path)
print command
stdin, stdout, stderr = self.ssh.exec_command(command, bufsize=50000)
tar = tarfile.open(fileobj=stdout, mode='r|')
if not self.tar:
self.tar = tar
# return
else:
for i in tar:
self.tar.addfile(i)
def is_cached(self, filename):
if not self.tar:
return False
return filename in self.tar.getnames()
def _get_file(self, filename):
""" Download filename and return the TarInfo object """
if filename not in self.tar.getnames():
self.add_to_tar(filename)
return self.tar.getmember(filename)
def get_cfg_files(self):
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
elif config_object == "cfg_dir":
absolut_path = self.abspath(config_value)
command = "find '%s' -type f -iname \*cfg" % (absolut_path)
stdin, stdout, stderr = self.ssh.exec_command(command)
raw_filelist = stdout.read().splitlines()
cfg_files += raw_filelist
else:
continue
if not self.is_cached(config_value):
self.add_to_tar(config_value)
return cfg_files
def isfile(self, path):
""" Behaves like os.path.isfile only, via ssh connection """
try:
copy = self._get_file(path)
return copy.isfile()
except IOError:
return False
def isdir(self, path):
""" Behaves like os.path.isdir only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISDIR(file_stat.st_mode)
except IOError:
return False
def islink(self, path):
""" Behaves like os.path.islink only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISLNK(file_stat.st_mode)
except IOError:
return False
def readlink(self, path):
""" Behaves like os.readlink only, via ssh connection """
return self.ftp.readlink(path)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat only, via ssh connection """
path = args[0]
if not self.is_cached(path):
self.add_to_tar(path)
if path not in self.tar.getnames():
raise IOError("No such file or directory %s" % path)
member = self.tar.getmember(path)
member.st_mode = member.mode
member.st_mtime = member.mtime
return member
def access(self, *args, **kwargs):
""" Wrapper around os.access only, via ssh connection """
return os.access(*args, **kwargs)
def exists(self, path):
""" Wrapper around os.path.exists only, via ssh connection """
try:
self.ftp.stat(path)
return True
except IOError:
return False
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir but via ssh connection """
stats = self.ftp.listdir_attr(*args, **kwargs)
for i in stats:
self.cached_stats[args[0] + "/" + i.filename] = i
files = map(lambda x: x.filename, stats)
return files
class MultiSite(Livestatus):
""" Wrapps around multiple Livesatus instances and aggregates the results
of queries.
Example:
>>> m = MultiSite()
>>> m.add_backend(path='/var/spool/nagios/livestatus.socket', name='local')
>>> m.add_backend(path='127.0.0.1:5992', name='remote')
"""
def __init__(self, *args, **kwargs):
super(MultiSite, self).__init__(*args, **kwargs)
self.backends = {}
def add_backend(self, path, name):
""" Add a new livestatus backend to this instance.
Arguments:
path (str): Path to file socket or remote address
name (str): Friendly shortname for this backend
"""
backend = Livestatus(
livestatus_socket_path=path,
nagios_cfg_file=self.nagios_cfg_file,
authuser=self.authuser
)
self.backends[name] = backend
def get_backends(self):
""" Returns a list of mk_livestatus instances
Returns:
list. List of mk_livestatus instances
"""
return self.backends
def get_backend(self, backend_name):
""" Return one specific backend that has previously been added
"""
if not backend_name:
return self.backends.values()[0]
try:
return self.backends[backend_name]
except KeyError:
raise ParserError("No backend found with name='%s'" % backend_name)
def query(self, query, *args, **kwargs):
""" Behaves like mk_livestatus.query() except results are aggregated from multiple backends
Arguments:
backend (str): If specified, fetch only data from this backend (see add_backend())
*args: Passed directly to mk_livestatus.query()
**kwargs: Passed directly to mk_livestatus.query()
"""
result = []
backend = kwargs.pop('backend', None)
# Special hack, if 'Stats' argument was provided to livestatus
# We have to maintain compatibility with old versions of livestatus
# and return single list with all results instead of a list of dicts
doing_stats = any(map(lambda x: x.startswith('Stats:'), args + (query,)))
# Iterate though all backends and run the query
# TODO: Make this multithreaded
for name, backend_instance in self.backends.items():
# Skip if a specific backend was requested and this is not it
if backend and backend != name:
continue
query_result = backend_instance.query(query, *args, **kwargs)
if doing_stats:
result = self._merge_statistics(result, query_result)
else:
for row in query_result:
row['backend'] = name
result.append(row)
return result
def _merge_statistics(self, list1, list2):
""" Merges multiple livestatus results into one result
Arguments:
list1 (list): List of integers
list2 (list): List of integers
Returns:
list. Aggregated results of list1 + list2
Example:
>>> result1 = [1,1,1,1]
>>> result2 = [2,2,2,2]
>>> MultiSite()._merge_statistics(result1, result2)
[3, 3, 3, 3]
"""
if not list1:
return list2
if not list2:
return list1
number_of_columns = len(list1)
result = [0] * number_of_columns
for row in (list1, list2):
for i, column in enumerate(row):
result[i] += column
return result
def get_host(self, host_name, backend=None):
""" Same as Livestatus.get_host() """
backend = self.get_backend(backend)
return backend.get_host(host_name)
def get_service(self, host_name, service_description, backend=None):
""" Same as Livestatus.get_service() """
backend = self.get_backend(backend)
return backend.get_service(host_name, service_description)
def get_contact(self, contact_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contact(contact_name)
def get_contactgroup(self, contactgroup_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contactgroup(contactgroup_name)
def get_servicegroup(self, servicegroup_name, backend=None):
""" Same as Livestatus.get_servicegroup() """
backend = self.get_backend(backend)
return backend.get_servicegroup(servicegroup_name)
def get_hostgroup(self, hostgroup_name, backend=None):
""" Same as Livestatus.get_hostgroup() """
backend = self.get_backend(backend)
return backend.get_hostgroup(hostgroup_name)
class config(Config):
""" This class is here only for backwards compatibility. Use Config instead. """
class mk_livestatus(Livestatus):
""" This class is here only for backwards compatibility. Use Livestatus instead. """
class object_cache(ObjectCache):
""" This class is here only for backwards compatibility. Use ObjectCache instead. """
class status(StatusDat):
""" This class is here only for backwards compatibility. Use StatusDat instead. """
class retention(RetentionDat):
""" This class is here only for backwards compatibility. Use RetentionDat instead. """
if __name__ == '__main__':
import time
start = time.time()
ssh = SshConfig(host='status.adagios.org', username='palli')
ssh.ssh.get_transport().window_size = 3 * 1024 * 1024
ssh.ssh.get_transport().use_compression()
# ssh.add_to_tar('/etc/nagios')
# sys.exit()
# ssh.ssh.exec_command("/bin/ls")
print "before reset"
ssh.parse()
end = time.time()
print "duration=", end - start
bland = ssh.tar.getmember('/etc/nagios/okconfig/hosts/web-servers/bland.is-http.cfg')
print bland.tobuf()
sys.exit(0)
print "ssh up"
ssh_conn = FastTransport(('status.adagios.org', 22))
ssh_conn.connect(username='palli')
ftp = paramiko.SFTPClient.from_transport(ssh_conn)
print "connected" \
""
ssh.ssh = ssh_conn
ssh.ftp = ftp
print "starting parse"
print "done parsing"
|
kaji-project/pynag
|
pynag/Parsers/__init__.py
|
Python
|
gpl-2.0
| 129,457
|
[
"MOE"
] |
e0e83b458adaf955e75500676e9fb105153e7ed1841f450a7b132a492ce74548
|
"""
Script to verify all examples in the readme.
Run from the project directory (i.e. parent) with
python -m tests/test_readme_examples
"""
from __future__ import print_function, division
import numpy as np
from scipy import misc
def main():
example_standard_situation()
example_heavy_augmentations()
example_show()
example_grayscale()
example_determinism()
example_keypoints()
example_single_augmenters()
example_unusual_distributions()
example_hooks()
def example_standard_situation():
print("Example: Standard Situation")
# -------
# dummy functions to make the example runnable here
def load_batch(batch_idx):
return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)
def train_on_images(images):
pass
# -------
#from imgaug import augmenters as iaa
import augmenters as iaa
seq = iaa.Sequential([
iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
])
for batch_idx in range(1000):
# 'images' should be either a 4D numpy array of shape (N, height, width, channels)
# or a list of 3D numpy arrays, each having shape (height, width, channels).
# Grayscale images must have shape (height, width, 1) each.
# All images must have numpy's dtype uint8. Values are expected to be in
# range 0-255.
images = load_batch(batch_idx)
images_aug = seq.augment_images(images)
train_on_images(images_aug)
# -----
# Make sure that the example really does something
if batch_idx == 0:
assert not np.array_equal(images, images_aug)
def example_heavy_augmentations():
print("Example: Heavy Augmentations")
import imgaug as ia
#from imgaug import augmenters as iaa
import augmenters as iaa
# random example images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
st = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.2), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=ia.ALL, # use any of scikit-image's interpolation methods
cval=(0, 1.0), # if mode is constant, use a cval between 0 and 1.0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=True # do all of the above in random order
)
images_aug = seq.augment_images(images)
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_show():
print("Example: Show")
#from imgaug import augmenters as iaa
import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# show an image with 8*8 augmented versions of image 0
seq.show_grid(images[0], cols=8, rows=8)
# Show an image with 8*8 augmented versions of image 0 and 8*8 augmented
# versions of image 1. The identical augmentations will be applied to
# image 0 and 1.
seq.show_grid([images[0], images[1]], cols=8, rows=8)
def example_grayscale():
print("Example: Grayscale")
#from imgaug import augmenters as iaa
import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# The library expects a list of images (3D inputs) or a single array (4D inputs).
# So we add an axis to our grayscale array to convert it to shape (16, 128, 128, 1).
images_aug = seq.augment_images(images[:, :, :, np.newaxis])
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_determinism():
print("Example: Determinism")
#from imgaug import augmenters as iaa
import augmenters as iaa
# Standard scenario: You have N RGB-images and additionally 21 heatmaps per image.
# You want to augment each image and its heatmaps identically.
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
heatmaps = np.random.randint(0, 255, (16, 128, 128, 21), dtype=np.uint8)
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)})])
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps)
# -----
# Make sure that the example really does something
import imgaug as ia
assert not np.array_equal(images, images_aug)
assert not np.array_equal(heatmaps, heatmaps_aug)
images_show = []
for img_idx in range(len(images)):
images_show.extend([images[img_idx], images_aug[img_idx], heatmaps[img_idx][..., 0:3], heatmaps_aug[img_idx][..., 0:3]])
ia.show_grid(images_show, cols=4)
def example_keypoints():
print("Example: Keypoints")
import imgaug as ia
#from imgaug import augmenters as iaa
import augmenters as iaa
from scipy import misc
import random
images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)
# Generate random keypoints.
# The augmenters expect a list of imgaug.KeypointsOnImage.
keypoints_on_images = []
for image in images:
height, width = image.shape[0:2]
keypoints = []
for _ in range(4):
x = random.randint(0, width-1)
y = random.randint(0, height-1)
keypoints.append(ia.Keypoint(x=x, y=y))
keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
# augment keypoints and images
images_aug = seq_det.augment_images(images)
keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)
# Example code to show each image and print the new keypoints coordinates
for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
image_before = keypoints_before.draw_on_image(image_before)
image_after = keypoints_after.draw_on_image(image_after)
misc.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
x_old, y_old = keypoint_old.x, keypoint_old.y
x_new, y_new = keypoint.x, keypoint.y
print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new))
def example_single_augmenters():
print("Example: Single Augmenters")
#from imgaug import augmenters as iaa
import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
flipper = iaa.Fliplr(1.0) # always horizontally flip each input image
images[0] = flipper.augment_image(images[0]) # horizontally flip image 0
vflipper = iaa.Flipud(0.9) # vertically flip each input image with 90% probability
images[1] = vflipper.augment_image(images[1]) # probably vertically flip image 1
blurer = iaa.GaussianBlur(3.0)
images[2] = blurer.augment_image(images[2]) # blur image 2 by a sigma of 3.0
images[3] = blurer.augment_image(images[3]) # blur image 3 by a sigma of 3.0 too
translater = iaa.Affine(translate_px={"x": -16}) # move each input image by 16px to the left
images[4] = translater.augment_image(images[4]) # move image 4 to the left
scaler = iaa.Affine(scale={"y": (0.8, 1.2)}) # scale each input image to 80-120% on the y axis
images[5] = scaler.augment_image(images[5]) # scale image 5 by 80-120% on the y axis
def example_unusual_distributions():
print("Example: Unusual Distributions")
#from imgaug import augmenters as iaa
import augmenters as iaa
#from imgaug import parameters as iap
import parameters as iap
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Blur by a value sigma which is sampled from a uniform distribution
# of range 0.1 <= x < 3.0.
# The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
images_aug = blurer.augment_images(images)
# Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
# i.e. sample a value that is usually around 1.0.
# Clip the resulting value so that it never gets below 0.1 or above 3.0.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Same again, but this time the mean of the normal distribution is not constant,
# but comes itself from a uniform distribution between 0.5 and 1.5.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(iap.Uniform(0.5, 1.5), 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Use for sigma one of exactly three allowed values: 0.5, 1.0 or 1.5.
blurer = iaa.GaussianBlur(iap.Choice([0.5, 1.0, 1.5]))
images_aug = blurer.augment_images(images)
# Sample sigma from a discrete uniform distribution of range 1 <= sigma <= 5,
# i.e. sigma will have any of the following values: 1, 2, 3, 4, 5.
blurer = iaa.GaussianBlur(iap.DiscreteUniform(1, 5))
images_aug = blurer.augment_images(images)
def example_hooks():
print("Example: Hooks")
import imgaug as ia
#from imgaug import augmenters as iaa
import augmenters as iaa
import numpy as np
# images and heatmaps, just arrays filled with value 30
images = np.ones((16, 128, 128, 3), dtype=np.uint8) * 30
heatmaps = np.ones((16, 128, 128, 21), dtype=np.uint8) * 30
# add vertical lines to see the effect of flip
images[:, 16:128-16, 120:124, :] = 120
heatmaps[:, 16:128-16, 120:124, :] = 120
seq = iaa.Sequential([
iaa.Fliplr(0.5, name="Flipper"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.Dropout(0.02, name="Dropout"),
iaa.AdditiveGaussianNoise(scale=0.01*255, name="MyLittleNoise"),
iaa.AdditiveGaussianNoise(loc=32, scale=0.0001*255, name="SomeOtherNoise"),
iaa.Affine(translate_px={"x": (-40, 40)}, name="Affine")
])
# change the activated augmenters for heatmaps
def activator_heatmaps(images, augmenter, parents, default):
if augmenter.name in ["GaussianBlur", "Dropout", "MyLittleNoise"]:
return False
else:
# default value for all other augmenters
return default
hooks_heatmaps = ia.HooksImages(activator=activator_heatmaps)
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps, hooks=hooks_heatmaps)
# -----------
ia.show_grid(images_aug)
ia.show_grid(heatmaps_aug[..., 0:3])
if __name__ == "__main__":
main()
|
infilect/ml-course1
|
week2/deconvolution_segmentation/imgaug/tests/test_readme_examples.py
|
Python
|
mit
| 13,325
|
[
"Gaussian"
] |
3ef2af145d50af69b6b5e63c9dc2dd393b563b940e0fc233d3b905ae527259e7
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, 'templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
package_path(join(SERVER, 'tests', 'config'))
package_path(join(SERVER, 'tests', 'data'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'Flask>=0.10.1',
'Jinja2>=2.7',
'MarkupSafe>=0.18',
'Werkzeug>=0.9.1',
'greenlet>=0.4.1',
'itsdangerous>=0.21',
'python-dateutil>=2.1',
'requests>=1.2.3',
'six>=1.5.2',
'pygments>=1.6',
'pystache>=0.5.3',
'markdown>=2.3.1',
'PyYAML>=3.10',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
# cli
# 'click>=3.3',
# tests
# 'pytest'
# 'mock>=1.0.1',
'colorama>=0.2.7'
]
if sys.version_info[:2] == (2, 6):
REQUIRES.append('argparse>=1.1')
# if sys.platform != "win32":
# REQUIRES.append('redis>=2.7.6')
if platform.python_implementation() != "PyPy":
# You need to install PyPy's fork of NumPy to make it work:
# pip install git+https://bitbucket.org/pypy/numpy.git
# Also pandas is not yet working with PyPy .
REQUIRES.extend([
'numpy>=1.7.1',
'pandas>=0.11.0'
])
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.views',
'bokeh.server.blaze',
'bokeh.server.utils',
'bokeh.server.tests',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
matbra/bokeh
|
setup.py
|
Python
|
bsd-3-clause
| 19,776
|
[
"GULP"
] |
60aeb8fb767132303f65b85c8f0c1a43b1083fdb6f9b3fb31d1c1798708bd57c
|
"""
Django settings for emw_output project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6c*gru)siva_r)%)x$a@o1lv7k8518lck#x^tjio!hit0t(1f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'places',
'visit',
'users',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'emw_output.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'emw_output.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
anhelus/emw-museum-output
|
emw_output/settings.py
|
Python
|
gpl-3.0
| 3,218
|
[
"VisIt"
] |
147027eaf86b176c8046148f8d8679bac256d109beea5301d522455696aefade
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("cuentas.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
javierwilson/cuentas
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,229
|
[
"VisIt"
] |
d5f9f9e24a93c397f560bccc3b1fef15f6176e3683d74fe49b659327df53959c
|
import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import matplotlib.pyplot as plt
import itertools
from scipy.stats import sigmaclip
from sklearn.gaussian_process import GaussianProcess
notes = """
In this case, we do a 3-d gaussian interpolation. The elevations are read from
h5py.File("gefs_elevations.nc")["elevation_control"]
This is the altitude of the first of the 11 ensembles. The other 10
runs appear to be evaluated at
h5py.File("gefs_elevations.nc")["elevation_perturbation"]
In this case, I will read in the 10 elevations, sigma clip them, and
there will be 2 altitude inputs to the 3-d GP.
"""
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
# Minimal script for gaussian process estimation
class Mesonet(object):
def __init__(self, stid, nlat, elon, elev, npts):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Gaussian process interpolation; mean
self.pdata = np.recarray((npts,5), dtype={"names": fKeys,
"formats": (np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64)})
class GEFS(object):
def __init__(self, stid, nlat, elon, elev, npts):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Capture mean
self.data = np.recarray((npts,5), dtype={"names": fKeys,
"formats": (np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64)})
def runGaussianProcess((args, regr)):
nugmin = 0.025**2
vals, mcoords, gcoords = args
gp = GaussianProcess(corr="squared_exponential",
regr=regr,
theta0=1e-1, thetaL=1e-2, thetaU=1,
normalize=True,
nugget=nugmin,
random_start=1)
gpres = gp.fit(gcoords, vals)
pred = gp.predict(mcoords)
return pred
def sigclip(data, switch):
mean = np.mean(data, axis=1)
std = np.std(data, axis=1)
idx = np.where(std == 0.0)
std[idx] = 1e10
if switch:
nsig = np.abs(data - mean[:,np.newaxis,:]) / std[:,np.newaxis,:]
else:
nsig = np.abs(data - mean[:,np.newaxis]) / std[:,np.newaxis]
idx = np.where(nsig > 3.0)
ma = np.ma.array(data)
ma[idx] = np.ma.masked
return ma.mean(axis=1).data
if __name__ == "__main__":
switch = sys.argv[1]
if switch == "train":
npts = 5113
else:
npts = 1796
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3], npts)
mesonets[s[0]] = station
gefssC = {}
gefssE = {}
for key in fKeys:
print "# LOADING", key
if switch == "train":
f = h5py.File("../train/%s_latlon_subset_19940101_20071231.nc" % (key), "r")
else:
f = h5py.File("../test/%s_latlon_subset_20080101_20121130.nc" % (key), "r")
if len(gefssC.keys()) == 0:
print "# INITIALIZING GEFS"
sidx = 0
for latidx in range(len(f['lat'])):
for lonidx in range(len(f['lon'])):
gefssC[sidx] = GEFS(sidx, f["lat"][latidx], f["lon"][lonidx]-360., 0.0, npts)
gefssE[sidx] = GEFS(sidx, f["lat"][latidx], f["lon"][lonidx]-360., 0.0, npts)
sidx += 1
f2 = h5py.File("../gefs_elevations.nc")
sidx = 0
for latidx in range(9):
for lonidx in range(16):
gefssC[sidx] = GEFS(sidx,
f2["latitude"][latidx][lonidx],
f2["longitude"][latidx][lonidx]-360.,
f2["elevation_control"][latidx][lonidx], npts)
gefssE[sidx] = GEFS(sidx,
f2["latitude"][latidx][lonidx],
f2["longitude"][latidx][lonidx]-360.,
f2["elevation_perturbation"][latidx][lonidx], npts)
sidx += 1
sidx = 0
for latidx in range(9):
for lonidx in range(16):
dataC = f[fMapper[key]][:,0,:,latidx,lonidx]
dataE = sigclip(f[fMapper[key]][:,1:,:,latidx,lonidx], True)
gefsC = gefssC[sidx]
gefsE = gefssE[sidx]
# make sure indices make sense
assert( gefsC.nlat == f["lat"][latidx] )
assert( gefsC.elon == f["lon"][lonidx]-360.0 )
gefsC.data[key] = dataC
gefsE.data[key] = dataE
sidx += 1
# Mesonet coords
mlats = []
mlons = []
melevs = []
for mesonet in mesonets.values(): # MAKE SURE COORDS ARE READ IN THE SAME ORDER AS VALUES
mlats.append(mesonet.nlat)
mlons.append(mesonet.elon)
melevs.append(mesonet.elev)
mlats = np.array(mlats)
mlons = np.array(mlons)
melevs = np.array(melevs)
mcoords = np.array(zip(mlats,mlons,melevs))
# GEFS coords
glats = []
glons = []
gelevs = []
for gefs in gefssC.values(): # MAKE SURE COORDS ARE READ IN THE SAME ORDER AS VALUES
glats.append(gefs.nlat)
glons.append(gefs.elon)
gelevs.append(gefs.elev)
for gefs in gefssE.values(): # MAKE SURE COORDS ARE READ IN THE SAME ORDER AS VALUES
glats.append(gefs.nlat)
glons.append(gefs.elon)
gelevs.append(gefs.elev)
glats = np.array(glats)
glons = np.array(glons)
gelevs = np.array(gelevs)
gcoords = np.array(zip(glats,glons,gelevs))
# Get ready to run all the GP
pool = multiprocessing.Pool(multiprocessing.cpu_count()//2) # high mem!
args = []
for tstep in range(npts):
print "# PREPPING T", tstep
for fstep in range(5):
for key in fKeys:
vals = []
for gefs in gefssC.values(): # MAKE SURE VALUES ARE READ IN THE SAME ORDER AS COORDS
vals.append(gefs.data[key][tstep][fstep])
for gefs in gefssE.values(): # MAKE SURE VALUES ARE READ IN THE SAME ORDER AS COORDS
vals.append(gefs.data[key][tstep][fstep])
args.append((np.array(vals), mcoords, gcoords))
# vals: 288,
# mcoord: 98, 3
# gcoord: 288, 3
for regr in ("constant", "linear", "quadratic"):
results = pool.map(runGaussianProcess, itertools.izip(args, itertools.repeat(regr)))
ridx = 0
for tstep in range(npts):
print "# SAVING T", tstep
for fstep in range(5):
for key in fKeys:
result = results[ridx]
for i in range(len(result)):
mesonets.values()[i].pdata[key][tstep][fstep] = result[i]
ridx += 1
datafile = "gp3_%s_%s.pickle" % (switch, regr)
buff = open(datafile, "wb")
cPickle.dump(mesonets, buff)
buff.close()
del results
|
acbecker/solar
|
gp3/gp3.py
|
Python
|
mit
| 8,986
|
[
"Gaussian"
] |
b723123e4b209ccf21eb44d9df67470625bb42171808b71d414d88ca46d5d38b
|
''' A plot of the Normal (Gaussian) distribution. This example demonstrates the
use of mathtext on axes and in ``Div`` objects.
.. bokeh-example-metadata::
:apis: bokeh.plotting.figure.line, bokeh.plotting.figure.quad, bokeh.models.Div, bokeh.models.TeX
:refs: :ref:`userguide_styling` > :ref:`userguide_styling_math`
:keywords: mathtext, latex
'''
import numpy as np
from bokeh.layouts import column
from bokeh.models import Div, TeX
from bokeh.plotting import figure, show
p = figure(width=670, height=400, toolbar_location=None,
title="Normal (Gaussian) Distribution")
n = 1000
rng = np.random.default_rng(825914)
x = rng.normal(loc=4.7, scale=12.3, size=n)
# Scale random data so that it has mean of 0 and standard deviation of 1
xbar = x.mean()
sigma = x.std()
scaled = (x - xbar) / sigma
# Histogram
bins = np.linspace(-3, 3, 40)
hist, edges = np.histogram(scaled, density=True, bins=bins)
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="skyblue", line_color="white",
legend_label=f"{n} random samples")
# Probability density function
x = np.linspace(-3.0, 3.0, 100)
pdf = np.exp(-0.5*x**2) / np.sqrt(2.0*np.pi)
p.line(x, pdf, line_width=2, line_color="navy",
legend_label="Probability Density Function")
p.y_range.start = 0
p.xaxis.axis_label = "x"
p.yaxis.axis_label = "PDF(x)"
p.xaxis.ticker = [-3, -2, -1, 0, 1, 2, 3]
p.xaxis.major_label_overrides = {
-3: TeX(r"\overline{x} - 3\sigma"),
-2: TeX(r"\overline{x} - 2\sigma"),
-1: TeX(r"\overline{x} - \sigma"),
0: TeX(r"\overline{x}"),
1: TeX(r"\overline{x} + \sigma"),
2: TeX(r"\overline{x} + 2\sigma"),
3: TeX(r"\overline{x} + 3\sigma"),
}
p.yaxis.ticker = [0, 0.1, 0.2, 0.3, 0.4]
p.yaxis.major_label_overrides = {
0: TeX(r"0"),
0.1: TeX(r"0.1/\sigma"),
0.2: TeX(r"0.2/\sigma"),
0.3: TeX(r"0.3/\sigma"),
0.4: TeX(r"0.4/\sigma"),
}
div = Div(text=r"""
A histogram of a samples from a Normal (Gaussian) distribution, together with
the ideal probability density function, given by the equation:
<p />
$$
\qquad PDF(x) = \frac{1}{\sigma\sqrt{2\pi}} \exp\left[-\frac{1}{2}
\left(\frac{x-\overline{x}}{\sigma}\right)^2 \right]
$$
""")
show(column(p, div))
|
bokeh/bokeh
|
examples/plotting/file/latex_normal_distribution.py
|
Python
|
bsd-3-clause
| 2,248
|
[
"Gaussian"
] |
482023903bf54951d7bde8ba3e224e47ee39753d8940190fe7eab8cc606b4343
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import framework
import numpy as np
import contextlib
from .core import VarDesc
__all__ = [
'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
'MSRA', 'force_init_on_cpu', 'init_on_cpu', 'ConstantInitializer',
'UniformInitializer', 'NormalInitializer', 'TruncatedNormalInitializer',
'XavierInitializer', 'BilinearInitializer', 'MSRAInitializer'
]
_force_init_on_cpu_ = False
def force_init_on_cpu():
"""
The flag of whether force to init variables on CPU.
Returns::
Examples:
.. code-block:: python
if force_init_on_cpu():
pass
"""
return _force_init_on_cpu_
@contextlib.contextmanager
def init_on_cpu():
"""
Force the variable to be inited on CPU.
Examples:
.. code-block:: python
with init_on_cpu():
step = layers.create_global_var()
"""
global _force_init_on_cpu_
pre_state = force_init_on_cpu()
_force_init_on_cpu_ = True
yield
_force_init_on_cpu_ = pre_state
class Initializer(object):
"""Base class for variable initializers
Defines the common interface of variable initializers.
They add operations to the init program that are used
to initialize variables. Users should not use this class
directly, but need to use one of its implementations.
"""
def __init__(self):
pass
def __call__(self, param, block):
"""Add corresponding initialization operations to the network
"""
raise NotImplementedError()
def _compute_fans(self, var):
"""Compute the fan_in and the fan_out for layers
This method computes the fan_in and the fan_out
for neural network layers, if not specified. It is
not possible to perfectly estimate fan_in and fan_out.
This method will estimate it correctly for matrix multiply and
convolutions.
Args:
var: variable for which fan_in and fan_out have to be computed
Returns:
tuple of two integers (fan_in, fan_out)
"""
shape = var.shape
if not shape or len(shape) == 0:
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
# This is the case for simple matrix multiply
fan_in = shape[0]
fan_out = shape[1]
else:
# Assume this to be a convolutional kernel
# In PaddlePaddle, the shape of the kernel is like:
# [num_filters, num_filter_channels, ...] where the remaining
# dimensions are the filter_size
receptive_field_size = np.prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
return (fan_in, fan_out)
class ConstantInitializer(Initializer):
"""Implements the constant initializer
Args:
value (float): constant value to initialize the variable
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Constant(value=2.0))
"""
def __init__(self, value=0.0, force_cpu=False):
assert value is not None
super(ConstantInitializer, self).__init__()
self._value = value
self._force_cpu = force_cpu
def __call__(self, var, block):
"""Add constant initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
op = block._prepend_op(
type="fill_constant",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"value": float(self._value),
'force_cpu': self._force_cpu or force_init_on_cpu()
})
var.op = op
return op
class UniformInitializer(Initializer):
"""Implements the random uniform distribution initializer
Args:
low (float): lower boundary of the uniform distribution
high (float): upper boundary of the uniform distribution
seed (int): random seed
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
"""
def __init__(self, low=-1.0, high=1.0, seed=0):
assert low is not None
assert high is not None
assert high >= low
assert seed is not None
super(UniformInitializer, self).__init__()
self._low = low
self._high = high
self._seed = seed
def __call__(self, var, block):
"""Add uniform distribution initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
if self._seed == 0:
self._seed = block.program.random_seed
op = block._prepend_op(
type="uniform_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"min": self._low,
"max": self._high,
"seed": self._seed
})
var.op = op
return op
class NormalInitializer(Initializer):
"""Implements the Random Normal(Gaussian) distribution initializer
Args:
loc (float): mean of the normal distribution
scale (float): standard deviation of the normal distribution
seed (int): random seed
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
"""
def __init__(self, loc=0.0, scale=1.0, seed=0):
assert loc is not None
assert scale is not None
assert seed is not None
super(NormalInitializer, self).__init__()
self._mean = loc
self._std_dev = scale
self._seed = seed
def __call__(self, var, block):
"""Add normal distribution initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
if self._seed == 0:
self._seed = block.program.random_seed
op = block._prepend_op(
type="gaussian_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed,
"use_mkldnn": False
})
var.op = op
return op
class TruncatedNormalInitializer(Initializer):
"""Implements the Random TruncatedNormal(Gaussian) distribution initializer
Args:
loc (float): mean of the normal distribution
scale (float): standard deviation of the normal distribution
seed (int): random seed
Examples:
.. code-block:: python
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
"""
def __init__(self, loc=0.0, scale=1.0, seed=0):
assert loc is not None
assert scale is not None
assert seed is not None
super(TruncatedNormalInitializer, self).__init__()
self._mean = loc
self._std_dev = scale
self._seed = seed
def __call__(self, var, block):
"""Add truncated normal distribution initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
if self._seed == 0:
self._seed = block.program.random_seed
op = block._prepend_op(
type="truncated_gaussian_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed
})
var.op = op
return op
class XavierInitializer(Initializer):
"""
This class implements the Xavier weight initializer from the paper
`Understanding the difficulty of training deep feedforward neural
networks <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
by Xavier Glorot and Yoshua Bengio.
This initializer is designed to keep the scale of the gradients
approximately same in all the layers. In case of Uniform distribution,
the range is [-x, x], where
.. math::
x = \sqrt{\\frac{6.0}{fan\_in + fan\_out}}
In case of Normal distribution, the mean is 0 and the standard deviation
is
.. math::
\sqrt{\\frac{2.0}{fan\_in + fan\_out}}
Args:
uniform (bool): whether to use uniform or normal distribution
fan_in (float): fan_in for Xavier initialization. If None, it is
inferred from the variable.
fan_out (float): fan_out for Xavier initialization. If None, it is
inferred from the variable.
seed (int): random seed
Note:
It is recommended to set fan_in and fan_out to None for most cases.
Examples:
.. code-block:: python
fc = fluid.layers.fc(
input=queries, size=10,
param_attr=fluid.initializer.Xavier(uniform=False))
"""
def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0):
assert uniform is not None
assert seed is not None
super(XavierInitializer, self).__init__()
self._uniform = uniform
self._fan_in = fan_in
self._fan_out = fan_out
self._seed = seed
def __call__(self, var, block):
"""Add xavier initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
f_in, f_out = self._compute_fans(var)
# If fan_in and fan_out are passed, use them
fan_in = f_in if self._fan_in is None else self._fan_in
fan_out = f_out if self._fan_out is None else self._fan_out
if self._seed == 0:
self._seed = block.program.random_seed
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in + fan_out))
op = block._prepend_op(
type="uniform_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
})
else:
std = np.sqrt(2.0 / float(fan_in + fan_out))
op = block._prepend_op(
type="gaussian_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
})
var.op = op
return op
class MSRAInitializer(Initializer):
"""Implements the MSRA initializer a.k.a. Kaiming Initializer
This class implements the weight initialization from the paper
`Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_
by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a
robust initialization method that particularly considers the rectifier
nonlinearities. In case of Uniform distribution, the range is [-x, x], where
.. math::
x = \sqrt{\\frac{6.0}{fan\_in}}
In case of Normal distribution, the mean is 0 and the standard deviation
is
.. math::
\sqrt{\\frac{2.0}{fan\_in}}
Args:
uniform (bool): whether to use uniform or normal distribution
fan_in (float): fan_in for MSRAInitializer. If None, it is\
inferred from the variable.
seed (int): random seed
Note:
It is recommended to set fan_in to None for most cases.
Examples:
.. code-block:: python
fc = fluid.layers.fc(
input=queries, size=10,
param_attr=fluid.initializer.MSRA(uniform=False))
"""
def __init__(self, uniform=True, fan_in=None, seed=0):
"""Constructor for MSRAInitializer
"""
assert uniform is not None
assert seed is not None
super(MSRAInitializer, self).__init__()
self._uniform = uniform
self._fan_in = fan_in
self._seed = seed
def __call__(self, var, block):
"""Add MSRA initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
f_in, f_out = self._compute_fans(var)
# If fan_in is passed, use it
fan_in = f_in if self._fan_in is None else self._fan_in
if self._seed == 0:
self._seed = block.program.random_seed
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in))
op = block._prepend_op(
type="uniform_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
})
else:
std = np.sqrt(2.0 / float(fan_in))
op = block._prepend_op(
type="gaussian_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
})
var.op = op
return op
class BilinearInitializer(Initializer):
"""
This initializer can be used in transposed convolution operator to
act as upsampling. Users can upsample a feature map with shape of
(B, C, H, W) by any integer factor. The usage is:
Examples:
.. code-block:: python
factor = 2
w_attr = ParamAttr(learning_rate=0., regularizer=L2Decay(0.),
initializer=Bilinear())
conv_up = fluid.layers.conv2d_transpose(
input,
num_filters=C,
output_size=None,
filter_size=2 * factor - factor % 2,
padding=ceil((factor - 1) / 2.),
stride=factor,
groups=C,
param_attr=w_attr,
bias_attr=False)
Where, `num_filters=C` and `groups=C` means this is channel-wise transposed
convolution. The filter shape will be (C, 1, K, K) where K is `filer_size`,
This initializer will set a (K, K) interpolation kernel for every channel
of the filter identically. The resulting shape of the output feature map
will be (B, C, factor * H, factor * W). Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
interpolation unchanged during training.
"""
def __init__(self):
"""Constructor for BilinearInitializer.
"""
super(BilinearInitializer, self).__init__()
def __call__(self, var, block):
"""Add biliear initialization ops for a variable
Args:
var (Variable): Variable that needs to be initialized.
block (Block): The block in which initialization ops should
be added.
Returns:
Operator: the initialization op
Raises:
ValueError: If type of `var` and `block` is not right.
If the shape of `var` size is not 4 and
var.shape[2] != var.shape[3].
"""
if not isinstance(var, framework.Variable):
raise ValueError("var must be framework.Variable.")
if not isinstance(block, framework.Block):
raise ValueError("block must be framework.Block.")
shape = var.shape
if len(shape) != 4:
raise ValueError("the length of shape must be 4.")
if shape[2] != shape[3]:
raise ValueError("shape[2] must be equal to shape[3].")
weight = np.zeros(np.prod(var.shape), dtype='float32')
size = shape[3]
# factor
f = np.ceil(size / 2.)
# center
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % size
y = (i / size) % size
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
weight = np.reshape(weight, shape)
if var.dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in weight.flat]
else:
raise ValueError("Unsupported dtype %s", input.dtype)
if np.prod(shape) > 1024 * 1024:
raise ValueError("The size of input is too big. ")
op = block.append_op(
type='assign_value',
outputs={'Out': [var]},
attrs={
'dtype': var.dtype,
'shape': list(shape),
value_name: values
})
var.op = op
return op
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
# param_attr=ParamAttr(fluid.initializer.Xavier()))
#
# It is no need to add an `Initializer` as the class suffix
Constant = ConstantInitializer
Uniform = UniformInitializer
Normal = NormalInitializer
TruncatedNormal = TruncatedNormalInitializer
Xavier = XavierInitializer
MSRA = MSRAInitializer
Bilinear = BilinearInitializer
|
reyoung/Paddle
|
python/paddle/fluid/initializer.py
|
Python
|
apache-2.0
| 20,383
|
[
"Gaussian"
] |
b11d70c51399a355a2e5a6786ef8a38e78ddf48e4640f6ed92c9b6c688afdf4f
|
#!/usr/bin/env python
'''Description'''
import sys
from Bio.Blast import NCBIXML
def find_match(xmlfile):
aligned_bases = 0
records = NCBIXML.parse(open(xmlfile))
op = open(xmlfile+'.out', 'w')
for record in records:
if record.alignments: # if matched,
alm = record.alignments[0]
hsp = alm.hsps[0]
aligned_bases += hsp.align_length
if hsp.expect < 1e-20:
print >> op, '%s\t%s\t%d' % \
(record.query,
alm.title,
hsp.align_length)
op.close()
def main():
'''Main function'''
xmlfile = sys.argv[1]
find_match(xmlfile)
if __name__=='__main__':
main()
|
likit/gimme_protocols
|
find_match.py
|
Python
|
bsd-3-clause
| 740
|
[
"BLAST"
] |
cc47219f5d8b3f3d2a833bdbd3c025ee97bfbd2a9e230f11d72441f17dfff618
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import os
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio.tasks import *
from pymatgen.io.abinitio.tasks import TaskPolicy
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class TaskManagerTest(PymatgenTest):
def test_base(self):
"""
Simple unit tests for Qadapter subclasses.
A more complete coverage would require integration testing.
"""
# Initialize the object from YAML file.
slurm_manager = TaskManager.from_file(os.path.join(test_dir, "taskmanager.yml"))
print(slurm_manager)
self.assertTrue(slurm_manager.tot_cores == 2)
self.assertTrue(slurm_manager.mpi_procs == 2)
self.assertTrue(slurm_manager.omp_threads == 1)
# Make a simple shell manager that will inherit the initial configuration.
shell_manager = slurm_manager.to_shell_manager(mpi_procs=1)
self.assertTrue(shell_manager.tot_cores == 1)
self.assertTrue(shell_manager.mpi_procs == 1)
# check that the initial slurm_manger has not been modified
self.assertTrue(slurm_manager.tot_cores == 2)
# Test pickle
self.serialize_with_pickle(slurm_manager, test_eq=False)
class ParalHintsTest(PymatgenTest):
def test_base(self):
"""Testing ParalHints."""
s = \
"""--- !Autoparal
#Autoparal section for Sigma runs.
info:
autoparal: 1
max_ncpus: 4
nkpt: 6
nsppol: 1
nspinor: 1
nbnds: 10
configurations:
- tot_ncpus: 1
mpi_ncpus: 1
efficiency: 1.000000000
mem_per_cpu: 11.54
vars: {npfft: 1, npkpt: 1}
- tot_ncpus: 2
mpi_ncpus: 2
efficiency: 1.000000000
mem_per_cpu: 7.42
vars: {npfft: 1, npkpt: 2}
- tot_ncpus: 2
mpi_ncpus: 2
efficiency: 0.100000000
mem_per_cpu: 9.42
vars: {npfft: 2, npkpt: 1}
- tot_ncpus: 3
mpi_ncpus: 3
efficiency: 0.833333333
mem_per_cpu: 6.60
vars: {npfft: 3, npkpt: 1}
- tot_ncpus: 4
mpi_ncpus: 4
efficiency: 0.833333333
mem_per_cpu: 15.77
vars: {npfft: 2, npkpt: 2}
...
"""
tmpfile = self.tmpfile_write(s)
aequal = self.assertEqual
# Parse the file with the configurations.
confs = ParalHintsParser().parse(tmpfile)
#print("all_confs:\n", confs)
# When autoparal is 1, max_ncpus must be specified
with self.assertRaises(ValueError):
policy = TaskPolicy(autoparal=1)
optimal = confs.select_optimal_conf(self, policy)
# Optimize speedup with ncpus <= max_ncpus
policy = TaskPolicy(autoparal=1, max_ncpus=3)
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 3)
# Optimize speedup with ncpus <= max_ncpus and condition on efficiency.
policy = TaskPolicy(autoparal=1, max_ncpus=4, condition={"efficiency": {"$ge": 0.9}})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 2)
# Optimize speedup with ncpus <= max_ncpus and conditions on efficiency and mem_per_cpu.
policy = TaskPolicy(autoparal=1, mode="default", max_ncpus=4,
condition={"$and": [{"efficiency": {"$ge": 0.8}}, {"mem_per_cpu": {"$le": 7.0}}]})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 3)
# If no configuration satisfies the constraints, we return the conf with the highest speedup.
policy = TaskPolicy(autoparal=1, max_ncpus=4, condition={"efficiency": {"$ge": 100}})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 4)
# Wrong conditions --> dump a warning and return the conf with the highest speedup.
policy = TaskPolicy(autoparal=1, max_ncpus=4, condition={"foobar": {"$ge": 100}})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 4)
# Select configuration with npfft == 1
policy = TaskPolicy(autoparal=1, max_ncpus=4, vars_condition={"npfft": {"$eq": 3}})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 3)
aequal(optimal.vars["npfft"], 3)
# Select configuration with npfft == 2 and npkpt == 1
policy = TaskPolicy(autoparal=1, max_ncpus=4,
vars_condition={"$and": [{"npfft": {"$eq": 2}}, {"npkpt": {"$eq": 1}}]})
optimal = confs.select_optimal_conf(policy)
aequal(optimal.tot_cores, 2)
aequal(optimal.vars["npfft"], 2)
aequal(optimal.vars["npkpt"], 1)
#assert 0
if __name__ == '__main__':
import unittest
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/io/abinitio/tests/test_tasks.py
|
Python
|
mit
| 4,854
|
[
"pymatgen"
] |
c8ecb14bb70d65bcfae9ad811a26888ed031bf76cfdc0531a8c4807c188d7f60
|
#!/usr/bin/env python
"""
Renames a dataset file by appending _purged to the file name so that it can later be removed from disk.
Usage: python rename_purged_datasets.py purge.log
"""
import sys, os
assert sys.version_info[:2] >= ( 2, 4 )
def main():
infile = sys.argv[1]
outfile = infile + ".renamed.log"
out = open( outfile, 'w' )
print >> out, "# The following renamed datasets can be removed from disk"
i = 0
renamed_files = 0
for i, line in enumerate( open( infile ) ):
line = line.rstrip( '\r\n' )
if line and line.startswith( '/var/opt/galaxy' ):
try:
purged_filename = line + "_purged"
os.rename( line, purged_filename )
print >> out, purged_filename
renamed_files += 1
except Exception, exc:
print >> out, "# Error, exception " + str( exc ) + " caught attempting to rename " + purged_filename
print >> out, "# Renamed " + str( renamed_files ) + " files"
if __name__ == "__main__":
main()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/scripts/cleanup_datasets/rename_purged_datasets.py
|
Python
|
gpl-3.0
| 1,066
|
[
"Galaxy"
] |
5ca4d5272a21ab6d1a0936a9bc5efc7e5300c8da265410d96fa746acaab5b46f
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a list of places.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# PlaceListOption class
#
#-------------------------------------------------------------------------
class PlaceListOption(Option):
"""
This class describes a widget that allows multiple places from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A label to be applied to this option.
Example: "Places"
:type label: string
:param value: A set of GIDs as initial values for this option.
Example: "111 222 333 444"
:type value: string
:return: nothing
"""
Option.__init__(self, label, "")
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/menu/_placelist.py
|
Python
|
gpl-2.0
| 1,794
|
[
"Brian"
] |
f4c5d59bf924e419c7db62482808fdfd16d8e2d170f49f3c67c9122f9417f8b8
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""Create Invenio collection cache."""
__revision__ = "$Id$"
import calendar
import copy
import sys
import cgi
import re
import os
import string
import time
from six.moves import cPickle
from invenio.config import \
CFG_CERN_SITE, \
CFG_WEBSEARCH_INSTANT_BROWSE, \
CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS, \
CFG_WEBSEARCH_I18N_LATEST_ADDITIONS, \
CFG_CACHEDIR, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_LANGS, \
CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_SCOAP3_SITE, \
CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES
from invenio.base.i18n import gettext_set_language
from invenio.legacy.search_engine import search_pattern_parenthesised, get_creation_date, get_field_i18nname, collection_restricted_p, sort_records, EM_REPOSITORY
from invenio.legacy.dbquery import run_sql, Error, get_table_update_time
from invenio.legacy.bibrank.record_sorter import get_bibrank_methods
from invenio.utils.date import convert_datestruct_to_dategui, strftime
from invenio.modules.formatter import format_record
from invenio.utils.shell import mymkdir
from intbitset import intbitset
from invenio.legacy.websearch_external_collections import \
external_collection_load_states, \
dico_collection_external_searches, \
external_collection_sort_engine_by_name
from invenio.legacy.bibsched.bibtask import task_init, task_get_option, task_set_option, \
write_message, task_has_option, task_update_progress, \
task_sleep_now_if_required
import invenio.legacy.template
websearch_templates = invenio.legacy.template.load('websearch')
from invenio.legacy.websearch_external_collections.searcher import external_collections_dictionary
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_TIMEOUT
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_NBRECS
from invenio.base.signals import webcoll_after_webpage_cache_update, \
webcoll_after_reclist_cache_update
## global vars
COLLECTION_HOUSE = {} # will hold collections we treat in this run of the program; a dict of {collname2, collobject1}, ...
# CFG_CACHE_LAST_UPDATED_TIMESTAMP_TOLERANCE -- cache timestamp
# tolerance (in seconds), to account for the fact that an admin might
# accidentally happen to edit the collection definitions at exactly
# the same second when some webcoll process was about to be started.
# In order to be safe, let's put an exaggerated timestamp tolerance
# value such as 20 seconds:
CFG_CACHE_LAST_UPDATED_TIMESTAMP_TOLERANCE = 20
# CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE -- location of the cache
# timestamp file:
CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE = "%s/collections/last_updated" % CFG_CACHEDIR
# CFG_CACHE_LAST_FAST_UPDATED_TIMESTAMP_FILE -- location of the cache
# timestamp file usef when running webcoll in the fast-mode.
CFG_CACHE_LAST_FAST_UPDATED_TIMESTAMP_FILE = "%s/collections/last_fast_updated" % CFG_CACHEDIR
def get_collection(colname):
"""Return collection object from the collection house for given colname.
If does not exist, then create it."""
if colname not in COLLECTION_HOUSE:
colobject = Collection(colname)
COLLECTION_HOUSE[colname] = colobject
return COLLECTION_HOUSE[colname]
## auxiliary functions:
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if var == fld:
return ' selected="selected"'
else:
return ""
def get_field(recID, tag):
"Gets list of field 'tag' for the record with 'recID' system number."
out = []
digit = tag[0:2]
bx = "bib%sx" % digit
bibx = "bibrec_bib%sx" % digit
query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec='%s' AND bx.id=bibx.id_bibxxx AND bx.tag='%s'" \
% (bx, bibx, recID, tag)
res = run_sql(query)
for row in res:
out.append(row[0])
return out
def check_nbrecs_for_all_external_collections():
"""Check if any of the external collections have changed their total number of records, aka nbrecs.
Return True if any of the total numbers of records have changed and False if they're all the same."""
res = run_sql("SELECT name FROM collection WHERE dbquery LIKE 'hostedcollection:%';")
for row in res:
coll_name = row[0]
if (get_collection(coll_name)).check_nbrecs_for_external_collection():
return True
return False
class Collection:
"Holds the information on collections (id,name,dbquery)."
def __init__(self, name=""):
"Creates collection instance by querying the DB configuration database about 'name'."
self.calculate_reclist_run_already = 0 # to speed things up without much refactoring
self.update_reclist_run_already = 0 # to speed things up without much refactoring
self.reclist_updated_since_start = 0 # to check if webpage cache need rebuilding
self.reclist_with_nonpublic_subcolls = intbitset()
# temporary counters for the number of records in hosted collections
self.nbrecs_tmp = None # number of records in a hosted collection
self.nbrecs_from_hosted_collections = 0 # total number of records from
# descendant hosted collections
if not name:
self.name = CFG_SITE_NAME # by default we are working on the home page
self.id = 1
self.dbquery = None
self.nbrecs = None
self.reclist = intbitset()
self.old_reclist = intbitset()
self.reclist_updated_since_start = 1
else:
self.name = name
try:
res = run_sql("""SELECT id,name,dbquery,nbrecs,reclist FROM collection
WHERE name=%s""", (name,))
if res:
self.id = res[0][0]
self.name = res[0][1]
self.dbquery = res[0][2]
self.nbrecs = res[0][3]
try:
self.reclist = intbitset(res[0][4])
except:
self.reclist = intbitset()
self.reclist_updated_since_start = 1
else: # collection does not exist!
self.id = None
self.dbquery = None
self.nbrecs = None
self.reclist = intbitset()
self.reclist_updated_since_start = 1
self.old_reclist = intbitset(self.reclist)
except Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit(1)
def get_example_search_queries(self):
"""Returns list of sample search queries for this collection.
"""
res = run_sql("""SELECT example.body FROM example
LEFT JOIN collection_example on example.id=collection_example.id_example
WHERE collection_example.id_collection=%s ORDER BY collection_example.score""", (self.id,))
return [query[0] for query in res]
def get_name(self, ln=CFG_SITE_LANG, name_type="ln", prolog="", epilog="", prolog_suffix=" ", epilog_suffix=""):
"""Return nicely formatted collection name for language LN.
The NAME_TYPE may be 'ln' (=long name), 'sn' (=short name), etc."""
out = prolog
i18name = ""
res = run_sql("SELECT value FROM collectionname WHERE id_collection=%s AND ln=%s AND type=%s", (self.id, ln, name_type))
try:
i18name += res[0][0]
except IndexError:
pass
if i18name:
out += i18name
else:
out += self.name
out += epilog
return out
def get_collectionbox_name(self, ln=CFG_SITE_LANG, box_type="r"):
"""
Return collection-specific labelling of 'Focus on' (regular
collection), 'Narrow by' (virtual collection) and 'Latest
addition' boxes.
If translation for given language does not exist, use label
for CFG_SITE_LANG. If no custom label is defined for
CFG_SITE_LANG, return default label for the box.
@param ln: the language of the label
@param box_type: can be 'r' (=Narrow by), 'v' (=Focus on), 'l' (=Latest additions)
"""
i18name = ""
res = run_sql("SELECT value FROM collectionboxname WHERE id_collection=%s AND ln=%s AND type=%s", (self.id, ln, box_type))
try:
i18name = res[0][0]
except IndexError:
res = run_sql("SELECT value FROM collectionboxname WHERE id_collection=%s AND ln=%s AND type=%s", (self.id, CFG_SITE_LANG, box_type))
try:
i18name = res[0][0]
except IndexError:
pass
if not i18name:
# load the right message language
_ = gettext_set_language(ln)
if box_type == "v":
i18name = _('Focus on:')
elif box_type == "r":
if CFG_SCOAP3_SITE:
i18name = _('Narrow by publisher/journal:')
else:
i18name = _('Narrow by collection:')
elif box_type == "l":
i18name = _('Latest additions:')
return i18name
def get_ancestors(self):
"Returns list of ancestors of the current collection."
ancestors = []
ancestors_ids = intbitset()
id_son = self.id
while 1:
query = "SELECT cc.id_dad,c.name FROM collection_collection AS cc, collection AS c "\
"WHERE cc.id_son=%d AND c.id=cc.id_dad" % int(id_son)
res = run_sql(query, None, 1)
if res:
col_ancestor = get_collection(res[0][1])
# looking for loops
if self.id in ancestors_ids:
write_message("Loop found in collection %s" % self.name, stream=sys.stderr)
raise OverflowError("Loop found in collection %s" % self.name)
else:
ancestors.append(col_ancestor)
ancestors_ids.add(col_ancestor.id)
id_son = res[0][0]
else:
break
ancestors.reverse()
return ancestors
def restricted_p(self):
"""Predicate to test if the collection is restricted or not. Return the contect of the
`restrited' column of the collection table (typically Apache group). Otherwise return
None if the collection is public."""
if collection_restricted_p(self.name):
return 1
return None
def get_sons(self, type='r'):
"Returns list of direct sons of type 'type' for the current collection."
sons = []
id_dad = self.id
query = "SELECT cc.id_son,c.name FROM collection_collection AS cc, collection AS c "\
"WHERE cc.id_dad=%d AND cc.type='%s' AND c.id=cc.id_son ORDER BY score DESC, c.name ASC" % (int(id_dad), type)
res = run_sql(query)
for row in res:
sons.append(get_collection(row[1]))
return sons
def get_descendants(self, type='r'):
"Returns list of all descendants of type 'type' for the current collection."
descendants = []
descendant_ids = intbitset()
id_dad = self.id
query = "SELECT cc.id_son,c.name FROM collection_collection AS cc, collection AS c "\
"WHERE cc.id_dad=%d AND cc.type='%s' AND c.id=cc.id_son ORDER BY score DESC" % (int(id_dad), type)
res = run_sql(query)
for row in res:
col_desc = get_collection(row[1])
# looking for loops
if self.id in descendant_ids:
write_message("Loop found in collection %s" % self.name, stream=sys.stderr)
raise OverflowError("Loop found in collection %s" % self.name)
else:
descendants.append(col_desc)
descendant_ids.add(col_desc.id)
tmp_descendants = col_desc.get_descendants()
for descendant in tmp_descendants:
descendant_ids.add(descendant.id)
descendants += tmp_descendants
return descendants
def write_cache_file(self, filename='', filebody={}):
"Write a file inside collection cache."
# open file:
dirname = "%s/collections" % (CFG_CACHEDIR)
mymkdir(dirname)
fullfilename = dirname + "/%s.html" % filename
try:
os.umask(0o022)
f = open(fullfilename, "wb")
except IOError as v:
try:
(code, message) = v
except:
code = 0
message = v
print("I/O Error: " + str(message) + " (" + str(code) + ")")
sys.exit(1)
# print user info:
write_message("... creating %s" % fullfilename, verbose=6)
# print page body:
cPickle.dump(filebody, f, cPickle.HIGHEST_PROTOCOL)
# close file:
f.close()
def update_webpage_cache(self, lang):
"""Create collection page header, navtrail, body (including left and right stripes) and footer, and
call write_cache_file() afterwards to update the collection webpage cache."""
return {} ## webpage cache update is not really needed in
## Invenio-on-Flask, so let's return quickly here
## for great speed-up benefit
## precalculate latest additions for non-aggregate
## collections (the info is ln and as independent)
if self.dbquery:
if CFG_WEBSEARCH_I18N_LATEST_ADDITIONS:
self.create_latest_additions_info(ln=lang)
else:
self.create_latest_additions_info()
# load the right message language
_ = gettext_set_language(lang)
# create dictionary with data
cache = {"te_portalbox" : self.create_portalbox(lang, 'te'),
"np_portalbox" : self.create_portalbox(lang, 'np'),
"ne_portalbox" : self.create_portalbox(lang, 'ne'),
"tp_portalbox" : self.create_portalbox(lang, "tp"),
"lt_portalbox" : self.create_portalbox(lang, "lt"),
"rt_portalbox" : self.create_portalbox(lang, "rt"),
"last_updated" : convert_datestruct_to_dategui(time.localtime(),
ln=lang)}
for aas in CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES: # do light, simple and advanced search pages:
cache["navtrail_%s" % aas] = self.create_navtrail_links(aas, lang)
cache["searchfor_%s" % aas] = self.create_searchfor(aas, lang)
cache["narrowsearch_%s" % aas] = self.create_narrowsearch(aas, lang, 'r')
cache["focuson_%s" % aas] = self.create_narrowsearch(aas, lang, "v")+ \
self.create_external_collections_box(lang)
cache["instantbrowse_%s" % aas] = self.create_instant_browse(aas=aas, ln=lang)
# write cache file
self.write_cache_file("%s-ln=%s"%(self.name, lang), cache)
return cache
def create_navtrail_links(self, aas=CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, ln=CFG_SITE_LANG):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in self.get_ancestors():
if dad.name != CFG_SITE_NAME: # exclude Home collection
dads.append((dad.name, dad.get_name(ln)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def create_portalbox(self, lang=CFG_SITE_LANG, position="rt"):
"""Creates portalboxes of language CFG_SITE_LANG of the position POSITION by consulting DB configuration database.
The position may be: 'lt'='left top', 'rt'='right top', etc."""
out = ""
query = "SELECT p.title,p.body FROM portalbox AS p, collection_portalbox AS cp "\
" WHERE cp.id_collection=%d AND p.id=cp.id_portalbox AND cp.ln='%s' AND cp.position='%s' "\
" ORDER BY cp.score DESC" % (self.id, lang, position)
res = run_sql(query)
for row in res:
title, body = row[0], row[1]
if title:
out += websearch_templates.tmpl_portalbox(title = title,
body = body)
else:
# no title specified, so print body ``as is'' only:
out += body
return out
def create_narrowsearch(self, aas=CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, ln=CFG_SITE_LANG, type="r"):
"""Creates list of collection descendants of type 'type' under title 'title'.
If aas==1, then links to Advanced Search interfaces; otherwise Simple Search.
Suitable for 'Narrow search' and 'Focus on' boxes."""
# get list of sons and analyse it
sons = self.get_sons(type)
if not sons:
return ''
# get descendents
descendants = self.get_descendants(type)
grandsons = []
if CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS:
# load grandsons for each son
for son in sons:
grandsons.append(son.get_sons())
# return ""
return websearch_templates.tmpl_narrowsearch(
aas = aas,
ln = ln,
type = type,
father = self,
has_grandchildren = len(descendants)>len(sons),
sons = sons,
display_grandsons = CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS,
grandsons = grandsons
)
def create_external_collections_box(self, ln=CFG_SITE_LANG):
external_collection_load_states()
if self.id not in dico_collection_external_searches:
return ""
engines_list = external_collection_sort_engine_by_name(dico_collection_external_searches[self.id])
return websearch_templates.tmpl_searchalso(ln, engines_list, self.id)
def create_latest_additions_info(self, rg=CFG_WEBSEARCH_INSTANT_BROWSE, ln=CFG_SITE_LANG):
"""
Create info about latest additions that will be used for
create_instant_browse() later.
"""
self.latest_additions_info = []
if self.nbrecs and self.reclist:
# firstly, get last 'rg' records:
recIDs = list(self.reclist)
of = 'hb'
# CERN hack begins: tweak latest additions for selected collections:
if CFG_CERN_SITE:
# alter recIDs list for some CERN collections:
this_year = time.strftime("%Y", time.localtime())
if self.name in ['CERN Yellow Reports','Videos']:
last_year = str(int(this_year) - 1)
# detect recIDs only from this and past year:
recIDs = list(self.reclist & \
search_pattern_parenthesised(p='year:%s or year:%s' % \
(this_year, last_year)))
# apply special filters:
if self.name in ['Videos']:
# select only videos with movies:
recIDs = list(intbitset(recIDs) & \
search_pattern_parenthesised(p='collection:"PUBLVIDEOMOVIE" -"Virtual Visit"'))
of = 'hvp'
if self.name in ['General Talks', 'Academic Training Lectures', 'Summer Student Lectures']:
#select only the lectures with material
recIDs = list(self.reclist & search_pattern_parenthesised(p='856:MediaArchive'))
# sort some CERN collections specially:
if self.name in ['Videos',
'Video Clips',
'Video Movies',
'Video News',
'Video Rushes',
'Webcast',
'ATLAS Videos',
'Restricted Video Movies',
'Restricted Video Rushes',
'LHC First Beam Videos',
'CERN openlab Videos']:
recIDs = sort_records(None, recIDs, '269__c', 'a')
elif self.name in ['LHCb Talks']:
recIDs = sort_records(None, recIDs, 'reportnumber', 'a')
elif self.name in ['CERN Yellow Reports']:
recIDs = sort_records(None, recIDs, '084__a', 'a')
elif self.name in ['CERN Courier Issues',
'CERN Courier Articles',
'CERN Bulletin Issues',
'CERN Bulletin Articles']:
recIDs = sort_records(None, recIDs, '773__y', 'a')
# CERN hack ends.
total = len(recIDs)
to_display = min(rg, total)
for idx in range(total-1, total-to_display-1, -1):
recid = recIDs[idx]
self.latest_additions_info.append({'id': recid,
'format': format_record(recid, of, ln=ln),
'date': get_creation_date(recid, fmt="%Y-%m-%d<br />%H:%i")})
return
def create_instant_browse(self, rg=CFG_WEBSEARCH_INSTANT_BROWSE, aas=CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, ln=CFG_SITE_LANG):
"Searches database and produces list of last 'rg' records."
if self.restricted_p():
return websearch_templates.tmpl_box_restricted_content(ln = ln)
if str(self.dbquery).startswith("hostedcollection:"):
return websearch_templates.tmpl_box_hosted_collection(ln = ln)
if rg == 0:
# do not show latest additions box
return ""
# CERN hack: do not display latest additions for some CERN collections:
if CFG_CERN_SITE and self.name in ['Periodicals', 'Electronic Journals',
'Press Office Photo Selection',
'Press Office Video Selection']:
return ""
try:
self.latest_additions_info
latest_additions_info_p = True
except:
latest_additions_info_p = False
if latest_additions_info_p:
passIDs = []
for idx in range(0, min(len(self.latest_additions_info), rg)):
# CERN hack: display the records in a grid layout, so do not show the related links
if CFG_CERN_SITE and self.name in ['Videos']:
passIDs.append({'id': self.latest_additions_info[idx]['id'],
'body': self.latest_additions_info[idx]['format'],
'date': self.latest_additions_info[idx]['date']})
else:
passIDs.append({'id': self.latest_additions_info[idx]['id'],
'body': self.latest_additions_info[idx]['format'] + \
websearch_templates.tmpl_record_links(recid=self.latest_additions_info[idx]['id'],
rm='citation',
ln=ln),
'date': self.latest_additions_info[idx]['date']})
if self.nbrecs > rg:
url = websearch_templates.build_search_url(
cc=self.name, jrec=rg+1, ln=ln, aas=aas)
else:
url = ""
# CERN hack: display the records in a grid layout
if CFG_CERN_SITE and self.name in ['Videos']:
return websearch_templates.tmpl_instant_browse(
aas=aas, ln=ln, recids=passIDs, more_link=url, grid_layout=True, father=self)
return websearch_templates.tmpl_instant_browse(
aas=aas, ln=ln, recids=passIDs, more_link=url, father=self)
return websearch_templates.tmpl_box_no_records(ln=ln)
def create_searchoptions(self):
"Produces 'Search options' portal box."
box = ""
query = """SELECT DISTINCT(cff.id_field),f.code,f.name FROM collection_field_fieldvalue AS cff, field AS f
WHERE cff.id_collection=%d AND cff.id_fieldvalue IS NOT NULL AND cff.id_field=f.id
ORDER BY cff.score DESC""" % self.id
res = run_sql(query)
if res:
for row in res:
field_id = row[0]
field_code = row[1]
field_name = row[2]
query_bis = """SELECT fv.value,fv.name FROM fieldvalue AS fv, collection_field_fieldvalue AS cff
WHERE cff.id_collection=%d AND cff.type='seo' AND cff.id_field=%d AND fv.id=cff.id_fieldvalue
ORDER BY cff.score_fieldvalue DESC, cff.score DESC, fv.name ASC""" % (self.id, field_id)
res_bis = run_sql(query_bis)
if res_bis:
values = [{'value' : '', 'text' : 'any' + ' ' + field_name}] # FIXME: internationalisation of "any"
for row_bis in res_bis:
values.append({'value' : cgi.escape(row_bis[0], 1), 'text' : row_bis[1]})
box += websearch_templates.tmpl_select(
fieldname = field_code,
values = values
)
return box
def create_sortoptions(self, ln=CFG_SITE_LANG):
"""Produces 'Sort options' portal box."""
# load the right message language
_ = gettext_set_language(ln)
box = ""
query = """SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE id_collection=%d AND cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""" % self.id
values = [{'value' : '', 'text': "- %s -" % _("latest first")}]
res = run_sql(query)
if res:
for row in res:
values.append({'value' : row[0], 'text': get_field_i18nname(row[1], ln)})
else:
for tmp in ('title', 'author', 'report number', 'year'):
values.append({'value' : tmp.replace(' ', ''), 'text' : get_field_i18nname(tmp, ln)})
box = websearch_templates.tmpl_select(
fieldname = 'sf',
css_class = 'address',
values = values
)
box += websearch_templates.tmpl_select(
fieldname = 'so',
css_class = 'address',
values = [
{'value' : 'a' , 'text' : _("asc.")},
{'value' : 'd' , 'text' : _("desc.")}
]
)
return box
def create_rankoptions(self, ln=CFG_SITE_LANG):
"Produces 'Rank options' portal box."
# load the right message language
_ = gettext_set_language(ln)
values = [{'value' : '', 'text': "- %s %s -" % (string.lower(_("OR")), _("rank by"))}]
for (code, name) in get_bibrank_methods(self.id, ln):
values.append({'value' : code, 'text': name})
box = websearch_templates.tmpl_select(
fieldname = 'rm',
css_class = 'address',
values = values
)
return box
def create_displayoptions(self, ln=CFG_SITE_LANG):
"Produces 'Display options' portal box."
# load the right message language
_ = gettext_set_language(ln)
values = []
for i in ['10', '25', '50', '100', '250', '500']:
values.append({'value' : i, 'text' : i + ' ' + _("results")})
box = websearch_templates.tmpl_select(
fieldname = 'rg',
selected = str(CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS),
css_class = 'address',
values = values
)
if self.get_sons():
box += websearch_templates.tmpl_select(
fieldname = 'sc',
css_class = 'address',
values = [
{'value' : '1' , 'text' : CFG_SCOAP3_SITE and _("split by publisher/journal") or _("split by collection")},
{'value' : '0' , 'text' : _("single list")}
]
)
return box
def create_formatoptions(self, ln=CFG_SITE_LANG):
"Produces 'Output format options' portal box."
# load the right message language
_ = gettext_set_language(ln)
box = ""
values = []
query = """SELECT f.code,f.name FROM format AS f, collection_format AS cf
WHERE cf.id_collection=%d AND cf.id_format=f.id AND f.visibility='1'
ORDER BY cf.score DESC, f.name ASC""" % self.id
res = run_sql(query)
if res:
for row in res:
values.append({'value' : row[0], 'text': row[1]})
else:
values.append({'value' : 'hb', 'text' : "HTML %s" % _("brief")})
box = websearch_templates.tmpl_select(
fieldname = 'of',
css_class = 'address',
values = values
)
return box
def create_searchwithin_selection_box(self, fieldname='f', value='', ln='en'):
"""Produces 'search within' selection box for the current collection."""
# get values
query = """SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%d AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""" % self.id
res = run_sql(query)
values = [{'value' : '', 'text' : get_field_i18nname("any field", ln)}]
if res:
for row in res:
values.append({'value' : row[0], 'text' : get_field_i18nname(row[1], ln)})
else:
if CFG_CERN_SITE:
for tmp in ['title', 'author', 'abstract', 'report number', 'year']:
values.append({'value' : tmp.replace(' ', ''), 'text' : get_field_i18nname(tmp, ln)})
else:
for tmp in ['title', 'author', 'abstract', 'keyword', 'report number', 'journal', 'year', 'fulltext', 'reference']:
values.append({'value' : tmp.replace(' ', ''), 'text' : get_field_i18nname(tmp, ln)})
return websearch_templates.tmpl_searchwithin_select(
fieldname = fieldname,
ln = ln,
selected = value,
values = values
)
def create_searchexample(self):
"Produces search example(s) for the current collection."
out = "$collSearchExamples = getSearchExample(%d, $se);" % self.id
return out
def create_searchfor(self, aas=CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, ln=CFG_SITE_LANG):
"Produces either Simple or Advanced 'Search for' box for the current collection."
if aas == 2:
return self.create_searchfor_addtosearch(ln)
elif aas == 1:
return self.create_searchfor_advanced(ln)
elif aas == 0:
return self.create_searchfor_simple(ln)
else:
return self.create_searchfor_light(ln)
def create_searchfor_addtosearch(self, ln=CFG_SITE_LANG):
"Produces add-to-search 'Search for' box for the current collection."
return websearch_templates.tmpl_searchfor_addtosearch(
ln=ln,
collection_id=self.name,
record_count=self.nbrecs,
searchwithin= self.create_searchwithin_selection_box(fieldname='f1', ln=ln),
)
def create_searchfor_light(self, ln=CFG_SITE_LANG):
"Produces light 'Search for' box for the current collection."
return websearch_templates.tmpl_searchfor_light(
ln=ln,
collection_id = self.name,
collection_name=self.get_name(ln=ln),
record_count=self.nbrecs,
example_search_queries=self.get_example_search_queries(),
)
def create_searchfor_simple(self, ln=CFG_SITE_LANG):
"Produces simple 'Search for' box for the current collection."
return websearch_templates.tmpl_searchfor_simple(
ln=ln,
collection_id = self.name,
collection_name=self.get_name(ln=ln),
record_count=self.nbrecs,
middle_option = self.create_searchwithin_selection_box(ln=ln),
)
def create_searchfor_advanced(self, ln=CFG_SITE_LANG):
"Produces advanced 'Search for' box for the current collection."
return websearch_templates.tmpl_searchfor_advanced(
ln = ln,
collection_id = self.name,
collection_name=self.get_name(ln=ln),
record_count=self.nbrecs,
middle_option_1 = self.create_searchwithin_selection_box('f1', ln=ln),
middle_option_2 = self.create_searchwithin_selection_box('f2', ln=ln),
middle_option_3 = self.create_searchwithin_selection_box('f3', ln=ln),
searchoptions = self.create_searchoptions(),
sortoptions = self.create_sortoptions(ln),
rankoptions = self.create_rankoptions(ln),
displayoptions = self.create_displayoptions(ln),
formatoptions = self.create_formatoptions(ln)
)
def calculate_reclist(self):
"""
Calculate, set and return the (reclist,
reclist_with_nonpublic_subcolls,
nbrecs_from_hosted_collections)
tuple for the given collection."""
if str(self.dbquery).startswith("hostedcollection:"):
# we don't normally use this function to calculate the reclist
# for hosted collections. In case we do, recursively for a regular
# ancestor collection, then quickly return the object attributes.
return (self.reclist,
self.reclist_with_nonpublic_subcolls,
self.nbrecs)
if self.calculate_reclist_run_already:
# do we really have to recalculate? If not,
# then return the object attributes
return (self.reclist,
self.reclist_with_nonpublic_subcolls,
self.nbrecs_from_hosted_collections)
write_message("... calculating reclist of %s" % self.name, verbose=6)
reclist = intbitset() # will hold results for public sons only; good for storing into DB
reclist_with_nonpublic_subcolls = intbitset() # will hold results for both public and nonpublic sons; good for deducing total
# number of documents
nbrecs_from_hosted_collections = 0 # will hold the total number of records from descendant hosted collections
if not self.dbquery:
# A - collection does not have dbquery, so query recursively all its sons
# that are either non-restricted or that have the same restriction rules
for coll in self.get_sons():
coll_reclist,\
coll_reclist_with_nonpublic_subcolls,\
coll_nbrecs_from_hosted_collection = coll.calculate_reclist()
if ((coll.restricted_p() is None) or
(coll.restricted_p() == self.restricted_p())):
# add this reclist ``for real'' only if it is public
reclist.union_update(coll_reclist)
reclist_with_nonpublic_subcolls.union_update(coll_reclist_with_nonpublic_subcolls)
# increment the total number of records from descendant hosted collections
nbrecs_from_hosted_collections += coll_nbrecs_from_hosted_collection
else:
# B - collection does have dbquery, so compute it:
# (note: explicitly remove DELETED records)
if CFG_CERN_SITE:
reclist = search_pattern_parenthesised(None, self.dbquery + \
' -980__:"DELETED" -980__:"DUMMY"', ap=-9) #ap=-9 for allow queries containing hidden tags
else:
reclist = search_pattern_parenthesised(None, self.dbquery + ' -980__:"DELETED"', ap=-9) #ap=-9 allow queries containing hidden tags
reclist_with_nonpublic_subcolls = copy.deepcopy(reclist)
# store the results:
self.nbrecs_from_hosted_collections = nbrecs_from_hosted_collections
self.nbrecs = len(reclist_with_nonpublic_subcolls) + \
nbrecs_from_hosted_collections
self.reclist = reclist
self.reclist_with_nonpublic_subcolls = reclist_with_nonpublic_subcolls
# last but not least, update the speed-up flag:
self.calculate_reclist_run_already = 1
# return the two sets, as well as
# the total number of records from descendant hosted collections:
return (self.reclist,
self.reclist_with_nonpublic_subcolls,
self.nbrecs_from_hosted_collections)
def calculate_nbrecs_for_external_collection(self, timeout=CFG_EXTERNAL_COLLECTION_TIMEOUT):
"""Calculate the total number of records, aka nbrecs, for given external collection."""
#if self.calculate_reclist_run_already:
# do we have to recalculate?
#return self.nbrecs
#write_message("... calculating nbrecs of external collection %s" % self.name, verbose=6)
if self.name in external_collections_dictionary:
engine = external_collections_dictionary[self.name]
if engine.parser:
self.nbrecs_tmp = engine.parser.parse_nbrecs(timeout)
if self.nbrecs_tmp >= 0: return self.nbrecs_tmp
# the parse_nbrecs() function returns negative values for some specific cases
# maybe we can handle these specific cases, some warnings or something
# for now the total number of records remains silently the same
else: return self.nbrecs
else: write_message("External collection %s does not have a parser!" % self.name, verbose=6)
else: write_message("External collection %s not found!" % self.name, verbose=6)
return 0
# last but not least, update the speed-up flag:
#self.calculate_reclist_run_already = 1
def check_nbrecs_for_external_collection(self):
"""Check if the external collections has changed its total number of records, aka nbrecs.
Rerurns True if the total number of records has changed and False if it's the same"""
write_message("*** self.nbrecs = %s / self.cal...ion = %s ***" % (str(self.nbrecs), str(self.calculate_nbrecs_for_external_collection())), verbose=6)
write_message("*** self.nbrecs != self.cal...ion = %s ***" % (str(self.nbrecs != self.calculate_nbrecs_for_external_collection()),), verbose=6)
return self.nbrecs != self.calculate_nbrecs_for_external_collection(CFG_HOSTED_COLLECTION_TIMEOUT_NBRECS)
def set_nbrecs_for_external_collection(self):
"""Set this external collection's total number of records, aka nbrecs"""
if self.calculate_reclist_run_already:
# do we have to recalculate?
return
write_message("... calculating nbrecs of external collection %s" % self.name, verbose=6)
if self.nbrecs_tmp:
self.nbrecs = self.nbrecs_tmp
else:
self.nbrecs = self.calculate_nbrecs_for_external_collection(CFG_HOSTED_COLLECTION_TIMEOUT_NBRECS)
# last but not least, update the speed-up flag:
self.calculate_reclist_run_already = 1
def update_reclist(self):
"Update the record universe for given collection; nbrecs, reclist of the collection table."
if self.update_reclist_run_already:
# do we have to reupdate?
return 0
write_message("... updating reclist of %s (%s recs)" % (self.name, self.nbrecs), verbose=6)
sys.stdout.flush()
try:
## In principle we could skip this update if old_reclist==reclist
## however we just update it here in case of race-conditions.
run_sql("UPDATE collection SET nbrecs=%s, reclist=%s WHERE id=%s",
(self.nbrecs, self.reclist.fastdump(), self.id))
if self.old_reclist != self.reclist:
self.reclist_updated_since_start = 1
else:
write_message("... no changes in reclist detected", verbose=6)
except Error as e:
print("Database Query Error %d: %s." % (e.args[0], e.args[1]))
sys.exit(1)
# last but not least, update the speed-up flag:
self.update_reclist_run_already = 1
return 0
def perform_display_collection(colID, colname, aas, ln, em, show_help_boxes):
"""Returns the data needed to display a collection page
The arguments are as follows:
colID - id of the collection to display
colname - name of the collection to display
aas - 0 if simple search, 1 if advanced search
ln - language of the page
em - code to display just part of the page
show_help_boxes - whether to show the help boxes or not"""
# check and update cache if necessary
cachedfile = open("%s/collections/%s-ln=%s.html" %
(CFG_CACHEDIR, colname, ln), "rb")
try:
data = cPickle.load(cachedfile)
except ValueError:
data = get_collection(colname).update_webpage_cache(ln)
cachedfile.close()
# check em value to return just part of the page
if em != "":
if EM_REPOSITORY["search_box"] not in em:
data["searchfor_%s" % aas] = ""
if EM_REPOSITORY["see_also_box"] not in em:
data["focuson_%s" % aas] = ""
if EM_REPOSITORY["all_portalboxes"] not in em:
if EM_REPOSITORY["te_portalbox"] not in em:
data["te_portalbox"] = ""
if EM_REPOSITORY["np_portalbox"] not in em:
data["np_portalbox"] = ""
if EM_REPOSITORY["ne_portalbox"] not in em:
data["ne_portalbox"] = ""
if EM_REPOSITORY["tp_portalbox"] not in em:
data["tp_portalbox"] = ""
if EM_REPOSITORY["lt_portalbox"] not in em:
data["lt_portalbox"] = ""
if EM_REPOSITORY["rt_portalbox"] not in em:
data["rt_portalbox"] = ""
c_body = websearch_templates.tmpl_webcoll_body(ln, colID, data.get("te_portalbox", ""),
data.get("searchfor_%s"%aas,''), data.get("np_portalbox", ''), data.get("narrowsearch_%s"%aas, ''),
data.get("focuson_%s"%aas, ''), data.get("instantbrowse_%s"%aas, ''), data.get("ne_portalbox", ''),
em=="" or EM_REPOSITORY["body"] in em)
if show_help_boxes <= 0:
data["rt_portalbox"] = ""
return (c_body, data.get("navtrail_%s"%aas, ''), data.get("lt_portalbox", ''), data.get("rt_portalbox", ''),
data.get("tp_portalbox", ''), data.get("te_portalbox", ''), data.get("last_updated", ''))
def get_datetime(var, format_string="%Y-%m-%d %H:%M:%S"):
"""Returns a date string according to the format string.
It can handle normal date strings and shifts with respect
to now."""
date = time.time()
shift_re = re.compile("([-\+]{0,1})([\d]+)([dhms])")
factors = {"d":24*3600, "h":3600, "m":60, "s":1}
m = shift_re.match(var)
if m:
sign = m.groups()[0] == "-" and -1 or 1
factor = factors[m.groups()[2]]
value = float(m.groups()[1])
date = time.localtime(date + sign * factor * value)
date = strftime(format_string, date)
else:
date = time.strptime(var, format_string)
date = strftime(format_string, date)
return date
def get_current_time_timestamp():
"""Return timestamp corresponding to the current time."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def compare_timestamps_with_tolerance(timestamp1,
timestamp2,
tolerance=0):
"""Compare two timestamps TIMESTAMP1 and TIMESTAMP2, of the form
'2005-03-31 17:37:26'. Optionally receives a TOLERANCE argument
(in seconds). Return -1 if TIMESTAMP1 is less than TIMESTAMP2
minus TOLERANCE, 0 if they are equal within TOLERANCE limit,
and 1 if TIMESTAMP1 is greater than TIMESTAMP2 plus TOLERANCE.
"""
# remove any trailing .00 in timestamps:
timestamp1 = re.sub(r'\.[0-9]+$', '', timestamp1)
timestamp2 = re.sub(r'\.[0-9]+$', '', timestamp2)
# first convert timestamps to Unix epoch seconds:
timestamp1_seconds = calendar.timegm(time.strptime(timestamp1, "%Y-%m-%d %H:%M:%S"))
timestamp2_seconds = calendar.timegm(time.strptime(timestamp2, "%Y-%m-%d %H:%M:%S"))
# now compare them:
if timestamp1_seconds < timestamp2_seconds - tolerance:
return -1
elif timestamp1_seconds > timestamp2_seconds + tolerance:
return 1
else:
return 0
def get_database_last_updated_timestamp():
"""Return last updated timestamp for collection-related and
record-related database tables.
"""
database_tables_timestamps = []
database_tables_timestamps.append(get_table_update_time('bibrec'))
## In INSPIRE bibfmt is on innodb and there is not such configuration
bibfmt_last_update = run_sql("SELECT max(last_updated) FROM bibfmt")
if bibfmt_last_update and bibfmt_last_update[0][0]:
database_tables_timestamps.append(str(bibfmt_last_update[0][0]))
try:
database_tables_timestamps.append(get_table_update_time('idxWORD%'))
except ValueError:
# There are no indexes in the database. That's OK.
pass
database_tables_timestamps.append(get_table_update_time('collection%'))
database_tables_timestamps.append(get_table_update_time('portalbox'))
database_tables_timestamps.append(get_table_update_time('field%'))
database_tables_timestamps.append(get_table_update_time('format%'))
database_tables_timestamps.append(get_table_update_time('rnkMETHODNAME'))
database_tables_timestamps.append(get_table_update_time('accROLE_accACTION_accARGUMENT', run_on_slave=True))
return max(database_tables_timestamps)
def get_cache_last_updated_timestamp():
"""Return last updated cache timestamp."""
try:
f = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, "r")
except:
return "1970-01-01 00:00:00"
timestamp = f.read()
f.close()
# Remove trailing newlines and whitespace.
timestamp = timestamp.strip()
return timestamp or "1970-01-01 00:00:00"
def set_cache_last_updated_timestamp(timestamp):
"""Set last updated cache timestamp to TIMESTAMP."""
try:
with open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, "w") as f:
f.write(timestamp)
except:
# FIXME: do something here
pass
return timestamp
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the value.
Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-r", "--recursive"):
task_set_option("recursive", 1)
elif key in ("-f", "--force"):
task_set_option("force", 1)
elif key in ("-q", "--quick"):
task_set_option("quick", 1)
elif key in ("-p", "--part"):
task_set_option("part", int(value))
elif key in ("-l", "--language"):
languages = task_get_option("language", [])
languages += value.split(',')
for ln in languages:
if ln not in CFG_SITE_LANGS:
print('ERROR: "%s" is not a recognized language code' % ln)
return False
task_set_option("language", languages)
else:
return False
return True
def task_submit_check_options():
if task_has_option('collection'):
coll = get_collection(task_get_option("collection"))
if coll.id is None:
print('ERROR: Collection "%s" does not exist' % coll.name)
return False
return True
def task_run_core():
""" Reimplement to add the body of the task."""
##
## ------->--->time--->------>
## (-1) | ( 0) | ( 1)
## | | |
## [T.db] | [T.fc] | [T.db]
## | | |
## |<-tol|tol->|
##
## the above is the compare_timestamps_with_tolerance result "diagram"
## [T.db] stands fore the database timestamp and [T.fc] for the file cache timestamp
## ( -1, 0, 1) stand for the returned value
## tol stands for the tolerance in seconds
##
## When a record has been added or deleted from one of the collections the T.db becomes greater that the T.fc
## and when webcoll runs it is fully ran. It recalculates the reclists and nbrecs, and since it updates the
## collections db table it also updates the T.db. The T.fc is set as the moment the task started running thus
## slightly before the T.db (practically the time distance between the start of the task and the last call of
## update_reclist). Therefore when webcoll runs again, and even if no database changes have taken place in the
## meanwhile, it fully runs (because compare_timestamps_with_tolerance returns 0). This time though, and if
## no databases changes have taken place, the T.db remains the same while T.fc is updated and as a result if
## webcoll runs again it will not be fully ran
##
task_run_start_timestamp = get_current_time_timestamp()
colls = []
# decide whether we need to run or not, by comparing last updated timestamps:
write_message("Database timestamp is %s." % get_database_last_updated_timestamp(), verbose=3)
write_message("Collection cache timestamp is %s." % get_cache_last_updated_timestamp(), verbose=3)
if task_has_option("part"):
write_message("Running cache update part %s only." % task_get_option("part"), verbose=3)
if check_nbrecs_for_all_external_collections() or task_has_option("force") or \
compare_timestamps_with_tolerance(get_database_last_updated_timestamp(),
get_cache_last_updated_timestamp(),
CFG_CACHE_LAST_UPDATED_TIMESTAMP_TOLERANCE) >= 0:
## either forced update was requested or cache is not up to date, so recreate it:
# firstly, decide which collections to do:
if task_has_option("collection"):
coll = get_collection(task_get_option("collection"))
colls.append(coll)
if task_has_option("recursive"):
r_type_descendants = coll.get_descendants(type='r')
colls += r_type_descendants
v_type_descendants = coll.get_descendants(type='v')
colls += v_type_descendants
else:
res = run_sql("SELECT name FROM collection ORDER BY id")
for row in res:
colls.append(get_collection(row[0]))
# secondly, update collection reclist cache:
if task_get_option('part', 1) == 1:
i = 0
for coll in colls:
i += 1
write_message("%s / reclist cache update" % coll.name)
if str(coll.dbquery).startswith("hostedcollection:"):
coll.set_nbrecs_for_external_collection()
else:
coll.calculate_reclist()
coll.update_reclist()
task_update_progress("Part 1/2: done %d/%d" % (i, len(colls)))
task_sleep_now_if_required(can_stop_too=True)
webcoll_after_reclist_cache_update.send('webcoll', collections=colls)
# thirdly, update collection webpage cache:
if task_get_option("part", 2) == 2:
# Updates cache only for chosen languages or for all available ones if none was chosen
languages = task_get_option("language", CFG_SITE_LANGS)
write_message("Cache update for the following languages: %s" % str(languages), verbose=3)
i = 0
for coll in colls:
i += 1
if coll.reclist_updated_since_start or task_has_option("collection") or task_get_option("force") or not task_get_option("quick"):
write_message("%s / webpage cache update" % coll.name)
for lang in languages:
coll.update_webpage_cache(lang)
webcoll_after_webpage_cache_update.send(coll.name, collection=coll, lang=lang)
else:
write_message("%s / webpage cache seems not to need an update and --quick was used" % coll.name, verbose=2)
task_update_progress("Part 2/2: done %d/%d" % (i, len(colls)))
task_sleep_now_if_required(can_stop_too=True)
# finally update the cache last updated timestamp:
# (but only when all collections were updated, not when only
# some of them were forced-updated as per admin's demand)
if not task_has_option("collection"):
set_cache_last_updated_timestamp(task_run_start_timestamp)
write_message("Collection cache timestamp is set to %s." % get_cache_last_updated_timestamp(), verbose=3)
else:
## cache up to date, we don't have to run
write_message("Collection cache is up to date, no need to run.")
## we are done:
return True
### okay, here we go:
if __name__ == '__main__':
main()
|
lnielsen/invenio
|
invenio/legacy/websearch/webcoll.py
|
Python
|
gpl-2.0
| 55,782
|
[
"VisIt"
] |
53b85abb61f0dc522b9d00307f8f4261aa57c6a6c5264222d03b5a9ac14c057d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
An interface to the excellent spglib library by Atsushi Togo
(http://spglib.sourceforge.net/) for pymatgen.
v1.0 - Now works with both ordered and disordered structure.
v2.0 - Updated for spglib 1.6.
v3.0 - pymatgen no longer ships with spglib. Instead, spglib (the python
version) is now a dependency and the SpacegroupAnalyzer merely serves
as an interface to spglib for pymatgen Structures.
"""
import copy
import itertools
import logging
import math
from collections import defaultdict
from fractions import Fraction
from math import cos, sin
import numpy as np
import spglib
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Molecule, PeriodicSite, Structure
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.util.coord import find_in_coord_list, pbc_diff
logger = logging.getLogger(__name__)
class SpacegroupAnalyzer:
"""
Takes a pymatgen.core.structure.Structure object and a symprec.
Uses spglib to perform various symmetry finding operations.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5.0):
"""
Args:
structure (Structure/IStructure): Structure to find symmetry
symprec (float): Tolerance for symmetry finding. Defaults to 0.01,
which is fairly strict and works well for properly refined
structures with atoms in the proper symmetry coordinates. For
structures with slight deviations from their proper atomic
positions (e.g., structures relaxed with electronic structure
codes), a looser tolerance of 0.1 (the value used in Materials
Project) is often needed.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
self._symprec = symprec
self._angle_tol = angle_tolerance
self._structure = structure
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = []
zs = []
magmoms = []
for species, g in itertools.groupby(structure, key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, "magmom"):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, "spin"):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
self._unique_species = unique_species
self._numbers = zs
# For now, we are setting magmom to zero.
self._cell = latt, positions, zs, magmoms
self._space_group_data = spglib.get_symmetry_dataset(
self._cell, symprec=self._symprec, angle_tolerance=angle_tolerance
)
def get_space_group_symbol(self):
"""
Get the spacegroup symbol (e.g., Pnma) for structure.
Returns:
(str): Spacegroup symbol for structure.
"""
return self._space_group_data["international"]
def get_space_group_number(self):
"""
Get the international spacegroup number (e.g., 62) for structure.
Returns:
(int): International spacegroup number for structure.
"""
return int(self._space_group_data["number"])
def get_space_group_operations(self):
"""
Get the SpacegroupOperations for the Structure.
Returns:
SpacgroupOperations object.
"""
return SpacegroupOperations(
self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations(),
)
def get_hall(self):
"""
Returns Hall symbol for structure.
Returns:
(str): Hall symbol
"""
return self._space_group_data["hall"]
def get_point_group_symbol(self):
"""
Get the point group associated with the structure.
Returns:
(Pointgroup): Point group for structure.
"""
rotations = self._space_group_data["rotations"]
# passing a 0-length rotations list to spglib can segfault
if len(rotations) == 0:
return "1"
return spglib.get_pointgroup(rotations)[0].strip()
def get_crystal_system(self):
"""
Get the crystal system for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).
Returns:
(str): Crystal system for structure or None if system cannot be detected.
"""
n = self._space_group_data["number"]
if 0 < n < 3:
return "triclinic"
if n < 16:
return "monoclinic"
if n < 75:
return "orthorhombic"
if n < 143:
return "tetragonal"
if n < 168:
return "trigonal"
if n < 195:
return "hexagonal"
if n < 231:
return "cubic"
raise ValueError("Invalid space group")
def get_lattice_type(self):
"""
Get the lattice for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).This is the same than the
crystal system with the exception of the hexagonal/rhombohedral
lattice
Returns:
(str): Lattice type for structure or None if type cannot be detected.
"""
n = self._space_group_data["number"]
system = self.get_crystal_system()
if n in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
if system == "trigonal":
return "hexagonal"
return system
def get_symmetry_dataset(self):
"""
Returns the symmetry dataset as a dict.
Returns:
(dict): With the following properties:
number: International space group number
international: International symbol
hall: Hall symbol
transformation_matrix: Transformation matrix from lattice of
input cell to Bravais lattice L^bravais = L^original * Tmat
origin shift: Origin shift in the setting of "Bravais lattice"
rotations, translations: Rotation matrices and translation
vectors. Space group operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs: Wyckoff letters
"""
return self._space_group_data
def _get_symmetry(self):
"""
Get the symmetry operations associated with the structure.
Returns:
Symmetry operations as a tuple of two equal length sequences.
(rotations, translations). "rotations" is the numpy integer array
of the rotation matrices for scaled positions
"translations" gives the numpy float64 array of the translation
vectors in scaled positions.
"""
d = spglib.get_symmetry(self._cell, symprec=self._symprec, angle_tolerance=self._angle_tol)
# Sometimes spglib returns small translation vectors, e.g.
# [1e-4, 2e-4, 1e-4]
# (these are in fractional coordinates, so should be small denominator
# fractions)
trans = []
for t in d["translations"]:
trans.append([float(Fraction.from_float(c).limit_denominator(1000)) for c in t])
trans = np.array(trans)
# fractional translations of 1 are more simply 0
trans[np.abs(trans) == 1] = 0
return d["rotations"], trans
def get_symmetry_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Returns:
([SymmOp]): List of symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot, trans in zip(rotation, translation):
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
trans = np.dot(trans, self._structure.lattice.matrix)
op = SymmOp.from_rotation_and_translation(rot, trans)
symmops.append(op)
return symmops
def get_point_group_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Args:
cartesian (bool): Whether to return SymmOps as cartesian or
direct coordinate operations.
Returns:
([SymmOp]): List of point group symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot in rotation:
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
op = SymmOp.from_rotation_and_translation(rot, np.array([0, 0, 0]))
symmops.append(op)
return symmops
def get_symmetrized_structure(self):
"""
Get a symmetrized structure. A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups.
Returns:
:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.
"""
ds = self.get_symmetry_dataset()
sg = SpacegroupOperations(
self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations(),
)
return SymmetrizedStructure(self._structure, sg, ds["equivalent_atoms"], ds["wyckoffs"])
def get_refined_structure(self):
"""
Get the refined structure based on detected symmetry. The refined
structure is a *conventional* cell setting with atoms moved to the
expected symmetry positions.
Returns:
Refined structure.
"""
# Atomic positions have to be specified by scaled positions for spglib.
lattice, scaled_positions, numbers = spglib.refine_cell(self._cell, self._symprec, self._angle_tol)
species = [self._unique_species[i - 1] for i in numbers]
s = Structure(lattice, species, scaled_positions)
return s.get_sorted_structure()
def find_primitive(self):
"""
Find a primitive version of the unit cell.
Returns:
A primitive cell in the input cell is searched and returned
as an Structure object. If no primitive cell is found, None is
returned.
"""
lattice, scaled_positions, numbers = spglib.find_primitive(self._cell, symprec=self._symprec)
species = [self._unique_species[i - 1] for i in numbers]
return Structure(lattice, species, scaled_positions, to_unit_cell=True).get_reduced_structure()
def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):
"""
k-point mesh of the Brillouin zone generated taken into account
symmetry.The method returns the irreducible kpoints of the mesh
and their weights
Args:
mesh (3x1 array): The number of kpoint for the mesh needed in
each direction
is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,
1) means all points are shifted by 0.5, 0.5, 0.5.
Returns:
A list of irreducible kpoints and their weights as a list of
tuples [(ir_kpoint, weight)], with ir_kpoint given
in fractional coordinates
"""
shift = np.array([1 if i else 0 for i in is_shift])
mapping, grid = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
results = []
for i, count in zip(*np.unique(mapping, return_counts=True)):
results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh, count))
return results
def get_conventional_to_primitive_transformation_matrix(self, international_monoclinic=True):
"""
Gives the transformation matrix to transform a conventional
unit cell to a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
Transformation matrix to go from conventional to primitive cell
"""
conv = self.get_conventional_standard_structure(international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return np.eye(3)
if lattice == "rhombohedral":
# check if the conventional representation is hexagonal or
# rhombohedral
lengths = conv.lattice.lengths
if abs(lengths[0] - lengths[2]) < 0.0001:
transf = np.eye
else:
transf = np.array([[-1, 1, 1], [2, 1, 1], [-1, -2, 1]], dtype=np.float_) / 3
elif "I" in self.get_space_group_symbol():
transf = np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]], dtype=np.float_) / 2
elif "F" in self.get_space_group_symbol():
transf = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]], dtype=np.float_) / 2
elif "C" in self.get_space_group_symbol() or "A" in self.get_space_group_symbol():
if self.get_crystal_system() == "monoclinic":
transf = np.array([[1, 1, 0], [-1, 1, 0], [0, 0, 2]], dtype=np.float_) / 2
else:
transf = np.array([[1, -1, 0], [1, 1, 0], [0, 0, 2]], dtype=np.float_) / 2
else:
transf = np.eye(3)
return transf
def get_primitive_standard_structure(self, international_monoclinic=True):
"""
Gives a structure with a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
The structure in a primitive standardized cell
"""
conv = self.get_conventional_standard_structure(international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return conv
transf = self.get_conventional_to_primitive_transformation_matrix(
international_monoclinic=international_monoclinic
)
new_sites = []
latt = Lattice(np.dot(transf, conv.lattice.matrix))
for s in conv:
new_s = PeriodicSite(
s.specie,
s.coords,
latt,
to_unit_cell=True,
coords_are_cartesian=True,
properties=s.properties,
)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
if lattice == "rhombohedral":
prim = Structure.from_sites(new_sites)
lengths = prim.lattice.lengths
angles = prim.lattice.angles
a = lengths[0]
alpha = math.pi * angles[0] / 180
new_matrix = [
[a * cos(alpha / 2), -a * sin(alpha / 2), 0],
[a * cos(alpha / 2), a * sin(alpha / 2), 0],
[
a * cos(alpha) / cos(alpha / 2),
0,
a * math.sqrt(1 - (cos(alpha) ** 2 / (cos(alpha / 2) ** 2))),
],
]
new_sites = []
latt = Lattice(new_matrix)
for s in prim:
new_s = PeriodicSite(
s.specie,
s.frac_coords,
latt,
to_unit_cell=True,
properties=s.properties,
)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
return Structure.from_sites(new_sites)
return Structure.from_sites(new_sites)
def get_conventional_standard_structure(self, international_monoclinic=True):
"""
Gives a structure with a conventional cell according to certain
standards. The standards are defined in Setyawan, W., & Curtarolo,
S. (2010). High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
They basically enforce as much as possible
norm(a1)<norm(a2)<norm(a3). NB This is not necessarily the same as the
standard settings within the International Tables of Crystallography,
for which get_refined_structure should be used instead.
Returns:
The structure in a conventional standardized cell
"""
tol = 1e-5
struct = self.get_refined_structure()
latt = struct.lattice
latt_type = self.get_lattice_type()
sorted_lengths = sorted(latt.abc)
sorted_dic = sorted(
[{"vec": latt.matrix[i], "length": latt.abc[i], "orig_index": i} for i in [0, 1, 2]],
key=lambda k: k["length"],
)
if latt_type in ("orthorhombic", "cubic"):
# you want to keep the c axis where it is
# to keep the C- settings
transf = np.zeros(shape=(3, 3))
if self.get_space_group_symbol().startswith("C"):
transf[2] = [0, 0, 1]
a, b = sorted(latt.abc[:2])
sorted_dic = sorted(
[{"vec": latt.matrix[i], "length": latt.abc[i], "orig_index": i} for i in [0, 1]],
key=lambda k: k["length"],
)
for i in range(2):
transf[i][sorted_dic[i]["orig_index"]] = 1
c = latt.abc[2]
elif self.get_space_group_symbol().startswith(
"A"
): # change to C-centering to match Setyawan/Curtarolo convention
transf[2] = [1, 0, 0]
a, b = sorted(latt.abc[1:])
sorted_dic = sorted(
[{"vec": latt.matrix[i], "length": latt.abc[i], "orig_index": i} for i in [1, 2]],
key=lambda k: k["length"],
)
for i in range(2):
transf[i][sorted_dic[i]["orig_index"]] = 1
c = latt.abc[0]
else:
for i, d in enumerate(sorted_dic):
transf[i][d["orig_index"]] = 1
a, b, c = sorted_lengths
latt = Lattice.orthorhombic(a, b, c)
elif latt_type == "tetragonal":
# find the "a" vectors
# it is basically the vector repeated two times
transf = np.zeros(shape=(3, 3))
a, b, c = sorted_lengths
for i, d in enumerate(sorted_dic):
transf[i][d["orig_index"]] = 1
if abs(b - c) < tol < abs(a - c):
a, c = c, a
transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)
latt = Lattice.tetragonal(a, c)
elif latt_type in ("hexagonal", "rhombohedral"):
# for the conventional cell representation,
# we allways show the rhombohedral lattices as hexagonal
# check first if we have the refined structure shows a rhombohedral
# cell
# if so, make a supercell
a, b, c = latt.abc
if np.all(np.abs([a - b, c - b, a - c]) < 0.001):
struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))
a, b, c = sorted(struct.lattice.abc)
if abs(b - c) < 0.001:
a, c = c, a
new_matrix = [
[a / 2, -a * math.sqrt(3) / 2, 0],
[a / 2, a * math.sqrt(3) / 2, 0],
[0, 0, c],
]
latt = Lattice(new_matrix)
transf = np.eye(3, 3)
elif latt_type == "monoclinic":
# You want to keep the c axis where it is to keep the C- settings
if self.get_space_group_operations().int_symbol.startswith("C"):
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1]
sorted_dic = sorted(
[{"vec": latt.matrix[i], "length": latt.abc[i], "orig_index": i} for i in [0, 1]],
key=lambda k: k["length"],
)
a = sorted_dic[0]["length"]
b = sorted_dic[1]["length"]
c = latt.abc[2]
new_matrix = None
for t in itertools.permutations(list(range(2)), 2):
m = latt.matrix
latt2 = Lattice([m[t[0]], m[t[1]], m[2]])
lengths = latt2.lengths
angles = latt2.angles
if angles[0] > 90:
# if the angle is > 90 we invert a and b to get
# an angle < 90
a, b, c, alpha, beta, gamma = Lattice([-m[t[0]], -m[t[1]], m[2]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][2] = 1
alpha = math.pi * alpha / 180
new_matrix = [
[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)],
]
continue
if angles[0] < 90:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][2] = 1
a, b, c = lengths
alpha = math.pi * angles[0] / 180
new_matrix = [
[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)],
]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[a, 0, 0], [0, b, 0], [0, 0, c]]
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1] # see issue #1929
for i, d in enumerate(sorted_dic):
transf[i][d["orig_index"]] = 1
# if not C-setting
else:
# try all permutations of the axis
# keep the ones with the non-90 angle=alpha
# and b<c
new_matrix = None
for t in itertools.permutations(list(range(3)), 3):
m = latt.matrix
a, b, c, alpha, beta, gamma = Lattice([m[t[0]], m[t[1]], m[t[2]]]).parameters
if alpha > 90 and b < c:
a, b, c, alpha, beta, gamma = Lattice([-m[t[0]], -m[t[1]], m[t[2]]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [
[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)],
]
continue
if alpha < 90 and b < c:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [
[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)],
]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [
[sorted_lengths[0], 0, 0],
[0, sorted_lengths[1], 0],
[0, 0, sorted_lengths[2]],
]
transf = np.zeros(shape=(3, 3))
for i, d in enumerate(sorted_dic):
transf[i][d["orig_index"]] = 1
if international_monoclinic:
# The above code makes alpha the non-right angle.
# The following will convert to proper international convention
# that beta is the non-right angle.
op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
beta = Lattice(new_matrix).beta
if beta < 90:
op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
latt = Lattice(new_matrix)
elif latt_type == "triclinic":
# we use a LLL Minkowski-like reduction for the triclinic cells
struct = struct.get_reduced_structure("LLL")
a, b, c = latt.lengths
alpha, beta, gamma = [math.pi * i / 180 for i in latt.angles]
new_matrix = None
test_matrix = [
[a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[
c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma),
c
* math.sqrt(
sin(gamma) ** 2 - cos(alpha) ** 2 - cos(beta) ** 2 + 2 * cos(alpha) * cos(beta) * cos(gamma)
)
/ sin(gamma),
],
]
def is_all_acute_or_obtuse(m):
recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)
return np.all(recp_angles <= 90) or np.all(recp_angles > 90)
if is_all_acute_or_obtuse(test_matrix):
transf = np.eye(3)
new_matrix = test_matrix
test_matrix = [
[-a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[
-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma),
-c
* math.sqrt(
sin(gamma) ** 2 - cos(alpha) ** 2 - cos(beta) ** 2 + 2 * cos(alpha) * cos(beta) * cos(gamma)
)
/ sin(gamma),
],
]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
new_matrix = test_matrix
test_matrix = [
[-a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[
c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma),
c
* math.sqrt(
sin(gamma) ** 2 - cos(alpha) ** 2 - cos(beta) ** 2 + 2 * cos(alpha) * cos(beta) * cos(gamma)
)
/ sin(gamma),
],
]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
new_matrix = test_matrix
test_matrix = [
[a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[
-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma),
-c
* math.sqrt(
sin(gamma) ** 2 - cos(alpha) ** 2 - cos(beta) ** 2 + 2 * cos(alpha) * cos(beta) * cos(gamma)
)
/ sin(gamma),
],
]
if is_all_acute_or_obtuse(test_matrix):
transf = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
new_matrix = test_matrix
latt = Lattice(new_matrix)
new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T
new_struct = Structure(
latt,
struct.species_and_occu,
new_coords,
site_properties=struct.site_properties,
to_unit_cell=True,
)
return new_struct.get_sorted_structure()
def get_kpoint_weights(self, kpoints, atol=1e-5):
"""
Calculate the weights for a list of kpoints.
Args:
kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note
that the code does not check that the list of kpoints
provided does not contain duplicates.
atol (float): Tolerance for fractional coordinates comparisons.
Returns:
List of weights, in the SAME order as kpoints.
"""
kpts = np.array(kpoints)
shift = []
mesh = []
for i in range(3):
nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]
if len(nonzero) != len(kpts):
# gamma centered
if not nonzero:
mesh.append(1)
else:
m = np.abs(np.round(1 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(0)
else:
# Monk
m = np.abs(np.round(0.5 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(1)
mapping, grid = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
mapping = list(mapping)
grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh
weights = []
mapped = defaultdict(int)
for k in kpoints:
for i, g in enumerate(grid):
if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):
mapped[tuple(g)] += 1
weights.append(mapping.count(mapping[i]))
break
if (len(mapped) != len(set(mapping))) or (not all(v == 1 for v in mapped.values())):
raise ValueError("Unable to find 1:1 corresponding between input " "kpoints and irreducible grid!")
return [w / sum(weights) for w in weights]
def is_laue(self):
"""
Check if the point group of the structure
has Laue symmetry (centrosymmetry)
"""
laue = [
"-1",
"2/m",
"mmm",
"4/m",
"4/mmm",
"-3",
"-3m",
"6/m",
"6/mmm",
"m-3",
"m-3m",
]
return str(self.get_point_group_symbol()) in laue
class PointGroupAnalyzer:
"""
A class to analyze the point group of a molecule. The general outline of
the algorithm is as follows:
1. Center the molecule around its center of mass.
2. Compute the inertia tensor and the eigenvalues and eigenvectors.
3. Handle the symmetry detection based on eigenvalues.
a. Linear molecules have one zero eigenvalue. Possible symmetry
operations are C*v or D*v
b. Asymetric top molecules have all different eigenvalues. The
maximum rotational symmetry in such molecules is 2
c. Symmetric top molecules have 1 unique eigenvalue, which gives a
unique rotation axis. All axial point groups are possible
except the cubic groups (T & O) and I.
d. Spherical top molecules have all three eigenvalues equal. They
have the rare T, O or I point groups.
.. attribute:: sch_symbol
Schoenflies symbol of the detected point group.
"""
inversion_op = SymmOp.inversion()
def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01, matrix_tol=0.1):
"""
The default settings are usually sufficient.
Args:
mol (Molecule): Molecule to determine point group for.
tolerance (float): Distance tolerance to consider sites as
symmetrically equivalent. Defaults to 0.3 Angstrom.
eigen_tolerance (float): Tolerance to compare eigen values of
the inertia tensor. Defaults to 0.01.
matrix_tol (float): Tolerance used to generate the full set of
symmetry operations of the point group.
"""
self.mol = mol
self.centered_mol = mol.get_centered_molecule()
self.tol = tolerance
self.eig_tol = eigen_tolerance
self.mat_tol = matrix_tol
self._analyze()
if self.sch_symbol in ["C1v", "C1h"]:
self.sch_symbol = "Cs"
def _analyze(self):
if len(self.centered_mol) == 1:
self.sch_symbol = "Kh"
else:
inertia_tensor = np.zeros((3, 3))
total_inertia = 0
for site in self.centered_mol:
c = site.coords
wt = site.species.weight
for i in range(3):
inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2 + c[(i + 2) % 3] ** 2)
for i, j in [(0, 1), (1, 2), (0, 2)]:
inertia_tensor[i, j] += -wt * c[i] * c[j]
inertia_tensor[j, i] += -wt * c[j] * c[i]
total_inertia += wt * np.dot(c, c)
# Normalize the inertia tensor so that it does not scale with size
# of the system. This mitigates the problem of choosing a proper
# comparison tolerance for the eigenvalues.
inertia_tensor /= total_inertia
eigvals, eigvecs = np.linalg.eig(inertia_tensor)
self.principal_axes = eigvecs.T
self.eigvals = eigvals
v1, v2, v3 = eigvals
eig_zero = abs(v1 * v2 * v3) < self.eig_tol
eig_all_same = abs(v1 - v2) < self.eig_tol and abs(v1 - v3) < self.eig_tol
eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(v1 - v3) > self.eig_tol and abs(v2 - v3) > self.eig_tol
self.rot_sym = []
self.symmops = [SymmOp(np.eye(4))]
if eig_zero:
logger.debug("Linear molecule detected")
self._proc_linear()
elif eig_all_same:
logger.debug("Spherical top molecule detected")
self._proc_sph_top()
elif eig_all_diff:
logger.debug("Asymmetric top molecule detected")
self._proc_asym_top()
else:
logger.debug("Symmetric top molecule detected")
self._proc_sym_top()
def _proc_linear(self):
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "D*h"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
self.sch_symbol = "C*v"
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic()
def _proc_sym_top(self):
"""
Handles symetric top molecules which has one unique eigenvalue whose
corresponding principal axis is a unique rotational axis. More complex
handling required to look for R2 axes perpendicular to this unique
axis.
"""
if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:
ind = 2
elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:
ind = 0
else:
ind = 1
logger.debug("Eigenvalues = %s." % self.eigvals)
unique_axis = self.principal_axes[ind]
self._check_rot_sym(unique_axis)
logger.debug("Rotation symmetries = %s" % self.rot_sym)
if len(self.rot_sym) > 0:
self._check_perpendicular_r2_axis(unique_axis)
if len(self.rot_sym) >= 2:
self._proc_dihedral()
elif len(self.rot_sym) == 1:
self._proc_cyclic()
else:
self._proc_no_rot_sym()
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break
def _proc_cyclic(self):
"""
Handles cyclic group molecules.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "C{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif mirror_type == "v":
self.sch_symbol += "v"
elif mirror_type == "":
if self.is_valid_op(SymmOp.rotoreflection(main_axis, angle=180 / rot)):
self.sch_symbol = "S{}".format(2 * rot)
def _proc_dihedral(self):
"""
Handles dihedral group molecules, i.e those with intersecting R2 axes
and a main axis.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "D{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif not mirror_type == "":
self.sch_symbol += "d"
def _check_R2_axes_asym(self):
"""
Test for 2-fold rotation along the principal axes. Used to handle
asymetric top molecules.
"""
for v in self.principal_axes:
op = SymmOp.from_axis_angle_and_translation(v, 180)
if self.is_valid_op(op):
self.symmops.append(op)
self.rot_sym.append((v, 2))
def _find_mirror(self, axis):
"""
Looks for mirror symmetry of specified type about axis. Possible
types are "h" or "vd". Horizontal (h) mirrors are perpendicular to
the axis while vertical (v) or diagonal (d) mirrors are parallel. v
mirrors has atoms lying on the mirror plane while d mirrors do
not.
"""
mirror_type = ""
# First test whether the axis itself is the normal to a mirror plane.
if self.is_valid_op(SymmOp.reflection(axis)):
self.symmops.append(SymmOp.reflection(axis))
mirror_type = "h"
else:
# Iterate through all pairs of atoms to find mirror
for s1, s2 in itertools.combinations(self.centered_mol, 2):
if s1.species == s2.species:
normal = s1.coords - s2.coords
if np.dot(normal, axis) < self.tol:
op = SymmOp.reflection(normal)
if self.is_valid_op(op):
self.symmops.append(op)
if len(self.rot_sym) > 1:
mirror_type = "d"
for v, r in self.rot_sym:
if np.linalg.norm(v - axis) >= self.tol:
if np.dot(v, normal) < self.tol:
mirror_type = "v"
break
else:
mirror_type = "v"
break
return mirror_type
def _get_smallest_set_not_on_axis(self, axis):
"""
Returns the smallest list of atoms with the same species and
distance from origin AND does not lie on the specified axis. This
maximal set limits the possible rotational symmetry operations,
since atoms lying on a test axis is irrelevant in testing rotational
symmetryOperations.
"""
def not_on_axis(site):
v = np.cross(site.coords, axis)
return np.linalg.norm(v) > self.tol
valid_sets = []
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
for test_set in dist_el_sites.values():
valid_set = list(filter(not_on_axis, test_set))
if len(valid_set) > 0:
valid_sets.append(valid_set)
return min(valid_sets, key=lambda s: len(s))
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in range(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
return None
def _proc_sph_top(self):
"""
Handles Sperhical Top Molecules, which belongs to the T, O or I point
groups.
"""
self._find_spherical_axes()
if len(self.rot_sym) == 0:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
if rot < 3:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
elif rot == 3:
mirror_type = self._find_mirror(main_axis)
if mirror_type != "":
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Th"
else:
self.sch_symbol = "Td"
else:
self.sch_symbol = "T"
elif rot == 4:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Oh"
else:
self.sch_symbol = "O"
elif rot == 5:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Ih"
else:
self.sch_symbol = "I"
def _find_spherical_axes(self):
"""
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point
group T molecules have only one unique 3-fold and one unique 2-fold
axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules
have a unique 5-fold axis.
"""
rot_present = defaultdict(bool)
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
test_set = min(dist_el_sites.values(), key=lambda s: len(s))
coords = [s.coords for s in test_set]
for c1, c2, c3 in itertools.combinations(coords, 3):
for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):
if not rot_present[2]:
test_axis = cc1 + cc2
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
rot_present[2] = self.is_valid_op(op)
if rot_present[2]:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
test_axis = np.cross(c2 - c1, c3 - c1)
if np.linalg.norm(test_axis) > self.tol:
for r in (3, 4, 5):
if not rot_present[r]:
op = SymmOp.from_axis_angle_and_translation(test_axis, 360 / r)
rot_present[r] = self.is_valid_op(op)
if rot_present[r]:
self.symmops.append(op)
self.rot_sym.append((test_axis, r))
break
if rot_present[2] and rot_present[3] and (rot_present[4] or rot_present[5]):
break
def get_pointgroup(self):
"""
Returns a PointGroup object for the molecule.
"""
return PointGroupOperations(self.sch_symbol, self.symmops, self.mat_tol)
def get_symmetry_operations(self):
"""
Return symmetry operations as a list of SymmOp objects.
Returns Cartesian coord symmops.
Returns:
([SymmOp]): List of symmetry operations.
"""
return generate_full_symmops(self.symmops, self.tol)
def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1 and self.centered_mol[ind[0]].species == site.species):
return False
return True
def _get_eq_sets(self):
"""
Calculates the dictionary for mapping equivalent atoms onto each other.
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
eq_sets, operations = defaultdict(set), defaultdict(dict)
symm_ops = [op.rotation_matrix for op in generate_full_symmops(self.symmops, self.tol)]
def get_clustered_indices():
indices = cluster_sites(self.centered_mol, self.tol, give_only_index=True)
out = list(indices[1].values())
if indices[0] is not None:
out.append([indices[0]])
return out
for index in get_clustered_indices():
sites = self.centered_mol.cart_coords[index]
for i, reference in zip(index, sites):
for op in symm_ops:
rotated = np.dot(op, sites.T).T
matched_indices = find_in_coord_list(rotated, reference, self.tol)
matched_indices = {dict(enumerate(index))[i] for i in matched_indices}
eq_sets[i] |= matched_indices
if i not in operations:
operations[i] = {j: op.T if j != i else UNIT for j in matched_indices}
else:
for j in matched_indices:
if j not in operations[i]:
operations[i][j] = op.T if j != i else UNIT
for j in matched_indices:
if j not in operations:
operations[j] = {i: op if j != i else UNIT}
elif i not in operations[j]:
operations[j][i] = op if j != i else UNIT
return {"eq_sets": eq_sets, "sym_ops": operations}
@staticmethod
def _combine_eq_sets(eq_sets, operations):
"""Combines the dicts of _get_equivalent_atom_dicts into one
Args:
eq_sets (dict)
operations (dict)
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
def all_equivalent_atoms_of_i(i, eq_sets, ops):
"""WORKS INPLACE on operations"""
visited = set([i])
tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]}
while tmp_eq_sets:
new_tmp_eq_sets = {}
for j in tmp_eq_sets:
if j in visited:
continue
visited.add(j)
for k in tmp_eq_sets[j]:
new_tmp_eq_sets[k] = eq_sets[k] - visited
if i not in ops[k]:
ops[k][i] = np.dot(ops[j][i], ops[k][j]) if k != i else UNIT
ops[i][k] = ops[k][i].T
tmp_eq_sets = new_tmp_eq_sets
return visited, ops
eq_sets = copy.deepcopy(eq_sets)
ops = copy.deepcopy(operations)
to_be_deleted = set()
for i in eq_sets:
if i in to_be_deleted:
continue
visited, ops = all_equivalent_atoms_of_i(i, eq_sets, ops)
to_be_deleted |= visited - {i}
for k in to_be_deleted:
eq_sets.pop(k, None)
return {"eq_sets": eq_sets, "sym_ops": ops}
def get_equivalent_atoms(self):
"""Returns sets of equivalent atoms with symmetry operations
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self._get_eq_sets()
return self._combine_eq_sets(eq["eq_sets"], eq["sym_ops"])
def symmetrize_molecule(self):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
None
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self.get_equivalent_atoms()
eq_sets, ops = eq["eq_sets"], eq["sym_ops"]
coords = self.centered_mol.cart_coords.copy()
for i, eq_indices in eq_sets.items():
for j in eq_indices:
coords[j] = np.dot(ops[j][i], coords[j])
coords[i] = np.mean(coords[list(eq_indices)], axis=0)
for j in eq_indices:
if j == i:
continue
coords[j] = np.dot(ops[i][j], coords[i])
coords[j] = np.dot(ops[i][j], coords[i])
molecule = Molecule(species=self.centered_mol.species_and_occu, coords=coords)
return {"sym_mol": molecule, "eq_sets": eq_sets, "sym_ops": ops}
def iterative_symmetrize(mol, max_n=10, tolerance=0.3, epsilon=1e-2):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
mol (Molecule): A pymatgen Molecule instance.
max_n (int): Maximum number of iterations.
tolerance (float): Tolerance for detecting symmetry.
Gets passed as Argument into
:class:`~pymatgen.analyzer.symmetry.PointGroupAnalyzer`.
epsilon (float): If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon,
the iteration stops before ``max_n`` is reached.
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
new = mol
n = 0
finished = False
while not finished and n <= max_n:
previous = new
PA = PointGroupAnalyzer(previous, tolerance=tolerance)
eq = PA.symmetrize_molecule()
new = eq["sym_mol"]
finished = np.allclose(new.cart_coords, previous.cart_coords, atol=epsilon)
n += 1
return eq
def cluster_sites(mol, tol, give_only_index=False):
"""
Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
import scipy.cluster as spcluster
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion="distance")
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
if give_only_index:
origin_site = i
else:
origin_site = site
else:
if give_only_index:
clustered_sites[(avg_dist[f[i]], site.species)].append(i)
else:
clustered_sites[(avg_dist[f[i]], site.species)].append(site)
return origin_site, clustered_sites
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
# Uses an algorithm described in:
# Gregory Butler. Fundamental Algorithms for Permutation Groups.
# Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15
UNIT = np.eye(4)
generators = [op.affine_matrix for op in symmops if not np.allclose(op.affine_matrix, UNIT)]
if not generators:
# C1 symmetry breaks assumptions in the algorithm afterwards
return symmops
full = list(generators)
for g in full:
for s in generators:
op = np.dot(g, s)
d = np.abs(full - op) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(op)
d = np.abs(full - UNIT) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(UNIT)
return [SymmOp(op) for op in full]
class SpacegroupOperations(list):
"""
Represents a space group, which is a collection of symmetry operations.
"""
def __init__(self, int_symbol, int_number, symmops):
"""
Args:
int_symbol (str): International symbol of the spacegroup.
int_number (int): International number of the spacegroup.
symmops ([SymmOp]): Symmetry operations associated with the
spacegroup.
"""
self.int_symbol = int_symbol
self.int_number = int_number
super().__init__(symmops)
def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=1e-3):
"""
Given two sets of PeriodicSites, test if they are actually
symmetrically equivalent under this space group. Useful, for example,
if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms
are symmetrically the same as selecting atoms 3 and 4, etc.
One use is in PartialRemoveSpecie transformation to return only
symmetrically distinct arrangements of atoms.
Args:
sites1 ([PeriodicSite]): 1st set of sites
sites2 ([PeriodicSite]): 2nd set of sites
symm_prec (float): Tolerance in atomic distance to test if atoms
are symmetrically similar.
Returns:
(bool): Whether the two sets of sites are symmetrically
equivalent.
"""
def in_sites(site):
for test_site in sites1:
if test_site.is_periodic_image(site, symm_prec, False):
return True
return False
for op in self:
newsites2 = [PeriodicSite(site.species, op.operate(site.frac_coords), site.lattice) for site in sites2]
for site in newsites2:
if not in_sites(site):
break
else:
return True
return False
def __str__(self):
return "{} ({}) spacegroup".format(self.int_symbol, self.int_number)
class PointGroupOperations(list):
"""
Defines a point group, which is essentially a sequence of symmetry
operations.
.. attribute:: sch_symbol
Schoenflies symbol of the point group.
"""
def __init__(self, sch_symbol, operations, tol=0.1):
"""
Args:
sch_symbol (str): Schoenflies symbol of the point group.
operations ([SymmOp]): Initial set of symmetry operations. It is
sufficient to provide only just enough operations to generate
the full set of symmetries.
tol (float): Tolerance to generate the full set of symmetry
operations.
"""
self.sch_symbol = sch_symbol
super().__init__(generate_full_symmops(operations, tol))
def __str__(self):
return self.sch_symbol
def __repr__(self):
return self.__str__()
|
richardtran415/pymatgen
|
pymatgen/symmetry/analyzer.py
|
Python
|
mit
| 62,326
|
[
"CRYSTAL",
"pymatgen"
] |
69db722ab297c9bae72a321edbc3f8e8cf30118bdb9e580d7bd4ba73831bf60f
|
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .mstats_basic import _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
'yeojohnson_normplot'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, density=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data.
vdist : "frozen" distribution object
Distribution object representing the variance of the data.
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data.
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Return an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, str):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See Also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b : scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See Also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
logdata = np.log(data)
# Compute the variance of the transformed data.
if lmb == 0:
variance = np.var(logdata, axis=0)
else:
# Transform without the constant offset 1/lmb. The offset does
# not effect the variance, and the subtraction of the offset can
# lead to loss of precision.
variance = np.var(data**lmb / lmb, axis=0)
return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Must be positive 1-dimensional. Must not be constant.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Data must be 1-dimensional.")
if x.size == 0:
return x
if np.all(x == x[0]):
raise ValueError("Data must not be constant.")
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def _normplot(method, x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox or Yeo-Johnson normality plot,
optionally show it. See `boxcox_normplot` or `yeojohnson_normplot` for
details."""
if method == 'boxcox':
title = 'Box-Cox Normality Plot'
transform_func = boxcox
else:
title = 'Yeo-Johnson Normality Plot'
transform_func = yeojohnson
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the square root of correlation coefficient
# of transformed x
z = transform_func(x, lmbda=val)
_, (_, _, r) = probplot(z, dist='norm', fit=True)
ppcc[i] = r
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\\lambda$',
ylabel='Prob Plot Corr. Coef.',
title=title)
return lmbdas, ppcc
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('boxcox', x, la, lb, plot, N)
def yeojohnson(x, lmbda=None):
r"""
Return a dataset transformed by a Yeo-Johnson power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : float, optional
If ``lmbda`` is ``None``, find the lambda that maximizes the
log-likelihood function and return it as the second output argument.
Otherwise the transformation is done for the given value.
Returns
-------
yeojohnson: ndarray
Yeo-Johnson power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
See Also
--------
probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox
Notes
-----
The Yeo-Johnson transform is given by::
y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0
log(x + 1), for x >= 0, lmbda = 0
-((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2
-log(-x + 1), for x < 0, lmbda = 2
Unlike `boxcox`, `yeojohnson` does not require the input data to be
positive.
.. versionadded:: 1.2.0
References
----------
I. Yeo and R.A. Johnson, "A New Family of Power Transformations to
Improve Normality or Symmetry", Biometrika 87.4 (2000):
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `yeojohnson` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, lmbda = stats.yeojohnson(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Yeo-Johnson transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError('Yeo-Johnson transformation is not defined for '
'complex numbers.')
if np.issubdtype(x.dtype, np.integer):
x = x.astype(np.float64, copy=False)
if lmbda is not None:
return _yeojohnson_transform(x, lmbda)
# if lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = yeojohnson_normmax(x)
y = _yeojohnson_transform(x, lmax)
return y, lmax
def _yeojohnson_transform(x, lmbda):
"""Return x transformed by the Yeo-Johnson power transform with given
parameter lmbda."""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def yeojohnson_llf(lmb, data):
r"""The yeojohnson log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Yeo-Johnson transformation. See `yeojohnson` for
details.
data : array_like
Data to calculate Yeo-Johnson log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float
Yeo-Johnson log-likelihood of `data` given `lmb`.
See Also
--------
yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax
Notes
-----
The Yeo-Johnson log-likelihood function is defined here as
.. math::
llf = N/2 \log(\hat{\sigma}^2) + (\lambda - 1)
\sum_i \text{ sign }(x_i)\log(|x_i| + 1)
where :math:`\hat{\sigma}^2` is estimated variance of the the Yeo-Johnson
transformed input data ``x``.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Yeo-Johnson log-likelihood
values for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.yeojohnson_llf(lmbda, x)
Also find the optimal lmbda value with `yeojohnson`:
>>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Yeo-Johnson log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `yeojohnson` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.yeojohnson(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
n_samples = data.shape[0]
if n_samples == 0:
return np.nan
trans = _yeojohnson_transform(data, lmb)
loglike = -n_samples / 2 * np.log(trans.var(axis=0))
loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0)
return loglike
def yeojohnson_normmax(x, brack=(-2, 2)):
"""
Compute optimal Yeo-Johnson transform parameter.
Compute optimal Yeo-Johnson transform parameter for input data, using
maximum likelihood estimation.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
Returns
-------
maxlog : float
The optimal transform parameter found.
See Also
--------
yeojohnson, yeojohnson_llf, yeojohnson_normplot
Notes
-----
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda``
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> lmax = stats.yeojohnson_normmax(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax, color='r')
>>> plt.show()
"""
def _neg_llf(lmbda, data):
return -yeojohnson_llf(lmbda, data)
return optimize.brent(_neg_llf, brack=brack, args=(x,))
def yeojohnson_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Yeo-Johnson normality plot, optionally show it.
A Yeo-Johnson normality plot shows graphically what the best
transformation parameter is to use in `yeojohnson` to obtain a
distribution that is close to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to
`yeojohnson` for Yeo-Johnson transformations. These are also the
limits of the horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Yeo-Johnson transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Yeo-Johnson plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.yeojohnson(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('yeojohnson', x, la, lb, plot, N)
def shapiro(x):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
a = zeros(N, 'f')
init = 0
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of the American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution.
The Anderson-Darling test tests the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
Array of sample data.
dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r',
'extreme1'}, optional
The type of distribution to test against. The default is 'norm'.
The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the
same distribution.
Returns
-------
statistic : float
The Anderson-Darling test statistic.
critical_values : list
The critical values for this distribution.
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
See Also
--------
kstest : The Kolmogorov-Smirnov test for goodness-of-fit.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If the returned statistic is larger than these critical values then
for the corresponding significance level, the null hypothesis that
the data come from the chosen distribution can be rejected.
The returned statistic is referred to as 'A2' in the references.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,
0.5%, 0.1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected. The value is floored / capped at
0.1% / 25%.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
The critical values corresponding to the significance levels from 0.01
to 0.25 are taken from [1]_. p-values are floored / capped
at 0.1% / 25%. Since the range of critical values might be extended in
future releases, it is recommended not to test ``p == 0.25``, but rather
``p >= 0.25`` (analogously for the lower bound).
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.2%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]),
0.03176687568842282)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The reported p-value (25%) has been capped and
may not be very accurate (since it corresponds to the value 0.449
whereas the statistic is -0.731):
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,
4.07210043, 5.56419101]),
0.25)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])
critical = b0 + b1 / math.sqrt(m) + b2 / m
sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])
if A2 < critical.min():
p = sig.max()
warnings.warn("p-value capped: true value larger than {}".format(p),
stacklevel=2)
elif A2 > critical.max():
p = sig.min()
warnings.warn("p-value floored: true value smaller than {}".format(p),
stacklevel=2)
else:
# interpolation of probit of significance level
pf = np.polyfit(critical, log(sig), 2)
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters.
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
Arrays of sample data.
Returns
-------
statistic : float
The Ansari-Bradley test statistic.
pvalue : float
The p-value of the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances.
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. Only 1d arrays are accepted, they may have
different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power
([3]_).
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input and input that is not 1d
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
if np.asanyarray(a).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths. Only one-dimensional
samples are accepted.
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
The test version using the mean was proposed in the original article
of Levene ([2]_) while the median and trimmed mean have been studied by
Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe
test.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# check for 1d input
for j in range(k):
if np.asanyarray(args[j]).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : int or array_like
The number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : int
The number of trials. This is ignored if x gives both the
number of successes and failures.
p : float, optional
The hypothesized probability of success. ``0 <= p <= 1``. The
default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Binomial_test
Examples
--------
>>> from scipy import stats
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> stats.binom_test(3, n=15, p=0.1, alternative='greater')
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
"""
x = atleast_1d(x).astype(np.int_)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided"):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case `y` is the second
set of measurements), or the differences between two sets of
measurements (in which case `y` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if `x` is the first set of
measurements), or not specified (if `x` is the differences between
two sets of measurements.) Must be one-dimensional.
zero_method : {'pratt', 'wilcox', 'zsplit'}, optional
The following options are available (default is 'wilcox'):
* 'pratt': Includes zero-differences in the ranking process,
but drops the ranks of the zeros, see [4]_, (more conservative).
* 'wilcox': Discards all zero-differences, the default.
* 'zsplit': Includes zero-differences in the ranking process and
split the zero rank between positive and negative ones.
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis to be tested, see Notes. Default is
"two-sided".
Returns
-------
statistic : float
If `alternative` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : float
The p-value for the test depending on `alternative`.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
The test has been introduced in [4]_. Given n independent samples
(xi, yi) from a bivariate distribution (i.e. paired samples),
it computes the differences di = xi - yi. One assumption of the test
is that the differences are symmetric, see [2]_.
The two-sided test has the null hypothesis that the median of the
differences is zero against the alternative that it is different from
zero. The one-sided test has the null hypothesis that the median is
positive against the alternative that it is negative
(``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).
The test uses a normal approximation to derive the p-value (if
``zero_method == 'pratt'``, the approximation is adjusted as in [5]_).
A typical rule is to require that n > 20 ([2]_, p. 383). For smaller n,
exact tables can be used to find critical values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> w, p = wilcoxon(d)
>>> w, p
(24.0, 0.04088813291185591)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> w, p = wilcoxon(d, alternative='greater')
>>> w, p
(96.0, 0.020444066455927955)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-value based on the approximation
is within the range of 0.019 and 0.054 given in [2]_.
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
d = x - y
if zero_method in ["wilcox", "pratt"]:
n_zero = np.sum(d == 0, axis=0)
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not work if "
"the x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
return WilcoxonResult(T, prob)
def median_test(*args, **kwds):
"""
Perform a Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.nonzero((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _circfuncs_common(samples, high, low, nan_policy='propagate'):
# Ensure samples are array-like and size is not zero
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.asarray(np.nan), np.asarray(np.nan), None
# Recast samples as radians that range between 0 and 2 pi and calculate
# the sine and cosine
sin_samp = sin((samples - low)*2.*pi / (high - low))
cos_samp = cos((samples - low)*2.*pi / (high - low))
# Apply the NaN policy
contains_nan, nan_policy = _contains_nan(samples, nan_policy)
if contains_nan and nan_policy == 'omit':
mask = np.isnan(samples)
# Set the sines and cosines that are NaN to zero
sin_samp[mask] = 0.0
cos_samp[mask] = 0.0
else:
mask = None
return samples, sin_samp, cos_samp, mask
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circmean : float
Circular mean.
Examples
--------
>>> from scipy.stats import circmean
>>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3])
0.2
>>> from scipy.stats import circmean
>>> circmean([0.2, 1.4, 2.6], high = 1, low = 0)
0.4
"""
samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
sin_sum = sin_samp.sum(axis=axis)
cos_sum = cos_samp.sum(axis=axis)
res = arctan2(sin_sum, cos_sum)
mask_nan = ~np.isnan(res)
if mask_nan.ndim > 0:
mask = res[mask_nan] < 0
else:
mask = res < 0
if mask.ndim > 0:
mask_nan[mask_nan] = mask
res[mask_nan] += 2*pi
elif mask:
res += 2*pi
# Set output to NaN if no samples went into the mean
if nmask is not None:
if nmask.all():
res = np.full(shape=res.shape, fill_value=np.nan)
else:
# Find out if any of the axis that are being averaged consist
# entirely of NaN. If one exists, set the result (res) to NaN
nshape = 0 if axis is None else axis
smask = nmask.shape[nshape] == nmask.sum(axis=axis)
if smask.any():
res[smask] = np.nan
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""
Compute the circular variance for samples assumed to be in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
Examples
--------
>>> from scipy.stats import circvar
>>> circvar([0, 2*np.pi/3, 5*np.pi/3])
2.19722457734
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis)
cos_mean = cos_samp.mean(axis=axis)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
R = hypot(sin_mean, cos_mean)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
Examples
--------
>>> from scipy.stats import circstd
>>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2])
0.063564063306
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis)
cos_mean = cos_samp.mean(axis=axis)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
R = hypot(sin_mean, cos_mean)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
|
aeklant/scipy
|
scipy/stats/morestats.py
|
Python
|
bsd-3-clause
| 115,276
|
[
"Gaussian"
] |
a64d041de8fe916fb77f46c711dba8e6837b68c328a54c3f77c97bd7d3207635
|
# coding: utf-8
from scipy import signal
import pandas as pd
import numpy as np
# TODO: Add Gaussian filter
# TODO: Add B-spline
# TODO: Move detrend
def lp_filter(data_in, filter_len=100, fs=1):
fc = 1 / filter_len
nyq = fs / 2
wn = fc / nyq
n = int(2 * filter_len * fs)
taps = signal.firwin(n, wn, window='blackman')
filtered_data = signal.filtfilt(taps, 1.0, data_in, padtype='even',
padlen=80)
name = 'filt_blackman_' + str(filter_len)
return pd.Series(filtered_data, index=data_in.index, name=name)
def detrend(data_in, begin, end):
# TODO: Do ndarrays with both dimensions greater than 1 work?
# TODO: Duck type this check?
if isinstance(data_in, pd.DataFrame):
length = len(data_in.index)
else:
length = len(data_in)
trend = np.linspace(begin, end, num=length)
if isinstance(data_in, (pd.Series, pd.DataFrame)):
trend = pd.Series(trend, index=data_in.index)
result = data_in.sub(trend, axis=0)
else:
result = data_in - trend
return result
|
DynamicGravitySystems/DGP
|
dgp/lib/transform/filters.py
|
Python
|
apache-2.0
| 1,094
|
[
"Gaussian"
] |
048c8a4d75b3d4f1d72b6ed65bfa8a50d68be87852129e090506faeb0a6bab0c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Provides a class for interacting with KPath classes to
generate high-symmetry k-paths using different conventions.
"""
from __future__ import division, unicode_literals
import itertools
import numpy as np
import networkx as nx
from pymatgen.symmetry.kpath import KPathBase, KPathSetyawanCurtarolo, KPathLatimerMunro, KPathSeek
__author__ = "Jason Munro"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Jason Munro"
__email__ = "jmunro@lbl.gov"
__status__ = "Development"
__date__ = "March 2020"
class HighSymmKpath(KPathBase):
"""
This class generates path along high symmetry lines in the
Brillouin zone according to different conventions.
The class is designed to be used with a specific primitive
cell setting. The definitions for the primitive cell
used can be found in: Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010.
The space group analyzer can be used to produce the correct
primitive structure
(method get_primitive_standard_structure(international_monoclinic=False)).
Ensure input structure is correct before 'get_kpoints()' method is used.
See individual KPath classes for details on specific conventions.
"""
def __init__(
self, structure, has_magmoms=False, magmom_axis=None, path_type="sc",
symprec=0.01, angle_tolerance=5, atol=1e-5):
"""
Args:
structure (Structure): Structure object
has_magmoms (boolean): Whether the input structure contains
magnetic moments as site properties with the key 'magmom.'
Values may be in the form of 3-component vectors given in
the basis of the input lattice vectors, in
which case the spin axis will default to a_3, the third
real-space lattice vector (this triggers a warning).
magmom_axis (list or numpy array): 3-component vector specifying
direction along which magnetic moments given as scalars
should point. If all magnetic moments are provided as
vectors then this argument is not used.
path_type (string): Chooses which convention to use to generate
the high symmetry path. Options are: 'sc', 'hin', 'lm' for the
Setyawan & Curtarolo, Hinuma et al., and Latimer & Munro conventions.
Choosing 'all' will generate one path with points from all three
conventions. Equivalent labels between each will also be generated.
Order will always be Latimer & Munro, Setyawan & Curtarolo, and Hinuma et al.
Lengths for each of the paths will also be generated and output
as a list. Note for 'all' the user will have to alter the labels on
their own for plotting.
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to determine symmetric
equivalence of points and lines on the BZ.
"""
super().__init__(structure, symprec=symprec, angle_tolerance=angle_tolerance, atol=atol)
self._path_type = path_type
self._equiv_labels = None
self._path_lengths = None
self._label_index = None
if path_type != "all":
if path_type == "lm":
self._kpath = self._get_lm_kpath(has_magmoms, magmom_axis, symprec, angle_tolerance, atol).kpath
elif path_type == "sc":
self._kpath = self._get_sc_kpath(symprec, angle_tolerance, atol).kpath
elif path_type == "hin":
hin_dat = self._get_hin_kpath(symprec, angle_tolerance, atol, not has_magmoms)
self._kpath = hin_dat.kpath
self._hin_tmat = hin_dat._tmat
else:
if has_magmoms:
raise ValueError("Cannot select 'all' with non-zero magmoms.")
lm_bs = self._get_lm_kpath(has_magmoms, magmom_axis, symprec, angle_tolerance, atol)
rpg = lm_bs._rpg
sc_bs = self._get_sc_kpath(symprec, angle_tolerance, atol)
hin_bs = self._get_hin_kpath(symprec, angle_tolerance, atol, not has_magmoms)
index = 0
cat_points = {}
label_index = {}
num_path = []
self._path_lengths = []
for bs in [lm_bs, sc_bs, hin_bs]:
for key, value in enumerate(bs.kpath["kpoints"]):
cat_points[index] = bs.kpath["kpoints"][value]
label_index[index] = value
index += 1
total_points_path = 0
for seg in bs.kpath["path"]:
total_points_path += len(seg)
for block in bs.kpath["path"]:
new_block = []
for label in block:
for ind in range(len(label_index) - len(bs.kpath["kpoints"]), len(label_index),):
if label_index[ind] == label:
new_block.append(ind)
num_path.append(new_block)
self._path_lengths.append(total_points_path)
self._label_index = label_index
self._kpath = {"kpoints": cat_points, "path": num_path}
self._equiv_labels = self._get_klabels(lm_bs, sc_bs, hin_bs, rpg)
@property
def path_type(self):
"""
Returns:
The type of kpath chosen
"""
return self._path_type
@property
def label_index(self):
"""
Returns:
The correspondance between numbers and kpoint symbols for the
combined kpath generated when path_type = 'all'. None otherwise.
"""
return self._label_index
@property
def equiv_labels(self):
"""
Returns:
The correspondance between the kpoint symbols in the Latimer and
Munro convention, Setyawan and Curtarolo, and Hinuma
conventions respectively. Only generated when path_type = 'all'.
"""
return self._equiv_labels
@property
def path_lengths(self):
"""
Returns:
List of lengths of the Latimer and Munro, Setyawan and Curtarolo, and Hinuma
conventions in the combined HighSymmKpath object when path_type = 'all' respectively.
None otherwise.
"""
return self._path_lengths
def _get_lm_kpath(self, has_magmoms, magmom_axis, symprec, angle_tolerance, atol):
"""
Returns:
Latimer and Munro k-path with labels.
"""
return KPathLatimerMunro(self._structure, has_magmoms, magmom_axis, symprec, angle_tolerance, atol)
def _get_sc_kpath(self, symprec, angle_tolerance, atol):
"""
Returns:
Setyawan and Curtarolo k-path with labels.
"""
kpath = KPathSetyawanCurtarolo(self._structure, symprec, angle_tolerance, atol)
self.prim = kpath.prim
self.conventional = kpath.conventional
self.prim_rec = kpath.prim_rec
self._rec_lattice = self.prim_rec
return kpath
def _get_hin_kpath(self, symprec, angle_tolerance, atol, tri):
"""
Returns:
Hinuma et al. k-path with labels.
"""
bs = KPathSeek(self._structure, symprec, angle_tolerance, atol, tri)
kpoints = bs.kpath["kpoints"]
tmat = bs._tmat
for key in kpoints:
kpoints[key] = np.dot(np.transpose(np.linalg.inv(tmat)), kpoints[key])
return bs
def _get_klabels(self, lm_bs, sc_bs, hin_bs, rpg):
"""
Returns:
labels (dict): Dictionary of equivalent labels for paths if 'all' is chosen.
If an exact kpoint match cannot be found, symmetric equivalency will be
searched for and indicated with an asterisk in the equivalent label.
If an equivalent label can still not be found, or the point is not in
the explicit kpath, its equivalent label will be set to itself in the output.
"""
lm_path = lm_bs.kpath
sc_path = sc_bs.kpath
hin_path = hin_bs.kpath
n_op = len(rpg)
pairs = itertools.permutations([{"sc": sc_path}, {"lm": lm_path}, {"hin": hin_path}], r=2)
labels = {"sc": {}, "lm": {}, "hin": {}}
for (a, b) in pairs:
[(a_type, a_path)] = list(a.items())
[(b_type, b_path)] = list(b.items())
sc_count = np.zeros(n_op)
for o_num in range(0, n_op):
a_tr_coord = []
for (label_a, coord_a) in a_path["kpoints"].items():
a_tr_coord.append(np.dot(rpg[o_num], coord_a))
for coord_a in a_tr_coord:
for key, value in b_path["kpoints"].items():
if np.allclose(value, coord_a, atol=self._atol):
sc_count[o_num] += 1
break
a_to_b_labels = {}
unlabeled = {}
for (label_a, coord_a) in a_path["kpoints"].items():
coord_a_t = np.dot(rpg[np.argmax(sc_count)], coord_a)
assigned = False
for (label_b, coord_b) in b_path["kpoints"].items():
if np.allclose(coord_b, coord_a_t, atol=self._atol):
a_to_b_labels[label_a] = label_b
assigned = True
break
if not assigned:
unlabeled[label_a] = coord_a
for (label_a, coord_a) in unlabeled.items():
for op in rpg:
coord_a_t = np.dot(op, coord_a)
key = [
key
for key, value in b_path["kpoints"].items()
if np.allclose(value, coord_a_t, atol=self._atol)
]
if key != []:
a_to_b_labels[label_a] = key[0][0] + "^{*}"
break
if key == []:
a_to_b_labels[label_a] = label_a
labels[a_type][b_type] = a_to_b_labels
return labels
@staticmethod
def get_continuous_path(bandstructure=None):
"""
Obtain a continous version of an inputted path using graph theory.
This routine will attempt to add connections between nodes of
odd-degree to ensure a Eulerian path can be formed. Initial
k-path must be able to be converted to a connected graph.
Args:
bandstructure (Bandstructure): Bandstructure object.
Returns:
distances_map (list): Mapping of 'distance' segments for altering a
BSPlotter object to new continuous path. List of tuples indicating the
new order of distances, and whether they should be plotted in reverse.
kpath_euler (list): New continuous kpath in the HighSymmKpath format.
"""
G = nx.Graph()
labels = []
for point in bandstructure.kpoints:
if point.label is not None:
labels.append(point.label)
plot_axis = []
for i in range(int(len(labels) / 2)):
G.add_edges_from([(labels[2 * i], labels[(2 * i) + 1])])
plot_axis.append((labels[2 * i], labels[(2 * i) + 1]))
G_euler = nx.algorithms.euler.eulerize(G)
G_euler_circuit = nx.algorithms.euler.eulerian_circuit(G_euler)
distances_map = []
kpath_euler = []
for edge_euler in G_euler_circuit:
kpath_euler.append(edge_euler)
for edge_reg in plot_axis:
if edge_euler == edge_reg:
distances_map.append((plot_axis.index(edge_reg), False))
elif edge_euler[::-1] == edge_reg:
distances_map.append((plot_axis.index(edge_reg), True))
return distances_map, kpath_euler
|
gVallverdu/pymatgen
|
pymatgen/symmetry/bandstructure.py
|
Python
|
mit
| 12,242
|
[
"pymatgen"
] |
a42b618421e201c11070b461a6332d4ad62b2a0d376135d8e099d0ff5c5fdb44
|
#!/usr/bin/env python
"""
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Oliver Henrich (University of Strathclyde, Glasgow)
------------------------------------------------------------------------- */
"""
"""
Creates unique base-pairings to avoid asymmetrical H-bonds.
Modified to create the bead wall setup.
N_BEADS is the number of beads along one direction, the final system will have N_BEADS^2 beads in the wall. N_BEADS should be set to be odd number.
"""
#Define number of base-pairs per turn for B-form DNA
N = 10.5
#Define distance between the big bead and the centre of mass of the last base-pair
BEAD_OFFSET = 2.0
WALL_PARTICLE_SIZE = 2.0
N_BEADS = 11
#Number of unique base type groups (1-4) ACGT counts as one group
N_BASE_TYPES = 20
"""
Import basic modules
"""
import sys, os, timeit
from timeit import default_timer as timer
start_time = timer()
"""
Try to import numpy; if failed, import a local version mynumpy
which needs to be provided
"""
try:
import numpy as np
except:
print("numpy not found. Exiting.", file=sys.stderr)
sys.exit(1)
"""
Check that the required arguments (box offset and size in simulation units
and the sequence file were provided
"""
try:
box_offset = float(sys.argv[1])
box_length = float(sys.argv[2])
infile = sys.argv[3]
if len(sys.argv) == 4:
topo = 'strand'
lk = 0
elif len(sys.argv) == 5:
topo = 'strand'
lk = int(sys.argv[4])
except:
print("Usage: %s <%s> <%s> <%s> <%s> " % (sys.argv[0], \
"box offset", "box length", "file with sequences", "[Lk]"), file=sys.stderr)
sys.exit(1)
box = np.array ([box_length, box_length, box_length])
"""
Try to open the file and fail gracefully if file cannot be opened
"""
try:
inp = open (infile, 'r')
inp.close()
except:
print("Could not open file '%s' for reading. \
Aborting." % infile, file=sys.stderr)
sys.exit(2)
# return parts of a string
def partition(s, d):
if d in s:
sp = s.split(d, 1)
return sp[0], d, sp[1]
else:
return s, "", ""
"""
Define the model constants
"""
# set model constants
PI = np.pi
POS_BASE = 0.4
POS_BACK = -0.4
EXCL_RC1 = 0.711879214356
EXCL_RC2 = 0.335388426126
EXCL_RC3 = 0.52329943261
"""
Define auxiliary variables for the construction of a helix
"""
# center of the double strand
COM_CENTRE_DS = POS_BASE + 0.2
# ideal rise between two consecutive nucleotides on the
# same strand which are to be base paired in a duplex
BASE_BASE = 0.3897628551303122
# cutoff distance for overlap check
RC2 = 16
# squares of the excluded volume distances for overlap check
RC2_BACK = EXCL_RC1**2
RC2_BASE = EXCL_RC2**2
RC2_BACK_BASE = EXCL_RC3**2
# enumeration to translate from letters to numbers and vice versa
number_to_base = {1 : 'A', 2 : 'C', 3 : 'G', 4 : 'T'}
base_to_number = {'A' : 1, 'a' : 1, 'C' : 2, 'c' : 2,
'G' : 3, 'g' : 3, 'T' : 4, 't' : 4}
# auxiliary arrays
positions = []
a1s = []
a3s = []
quaternions = []
newpositions = []
newa1s = []
newa3s = []
basetype = []
strandnum = []
bonds = []
"""
Convert local body frame to quaternion DOF
"""
def exyz_to_quat (mya1, mya3):
mya2 = np.cross(mya3, mya1)
myquat = [1,0,0,0]
q0sq = 0.25 * (mya1[0] + mya2[1] + mya3[2] + 1.0)
q1sq = q0sq - 0.5 * (mya2[1] + mya3[2])
q2sq = q0sq - 0.5 * (mya1[0] + mya3[2])
q3sq = q0sq - 0.5 * (mya1[0] + mya2[1])
# some component must be greater than 1/4 since they sum to 1
# compute other components from it
if q0sq >= 0.25:
myquat[0] = np.sqrt(q0sq)
myquat[1] = (mya2[2] - mya3[1]) / (4.0*myquat[0])
myquat[2] = (mya3[0] - mya1[2]) / (4.0*myquat[0])
myquat[3] = (mya1[1] - mya2[0]) / (4.0*myquat[0])
elif q1sq >= 0.25:
myquat[1] = np.sqrt(q1sq)
myquat[0] = (mya2[2] - mya3[1]) / (4.0*myquat[1])
myquat[2] = (mya2[0] + mya1[1]) / (4.0*myquat[1])
myquat[3] = (mya1[2] + mya3[0]) / (4.0*myquat[1])
elif q2sq >= 0.25:
myquat[2] = np.sqrt(q2sq)
myquat[0] = (mya3[0] - mya1[2]) / (4.0*myquat[2])
myquat[1] = (mya2[0] + mya1[1]) / (4.0*myquat[2])
myquat[3] = (mya3[1] + mya2[2]) / (4.0*myquat[2])
elif q3sq >= 0.25:
myquat[3] = np.sqrt(q3sq)
myquat[0] = (mya1[1] - mya2[0]) / (4.0*myquat[3])
myquat[1] = (mya3[0] + mya1[2]) / (4.0*myquat[3])
myquat[2] = (mya3[1] + mya2[2]) / (4.0*myquat[3])
norm = 1.0/np.sqrt(myquat[0]*myquat[0] + myquat[1]*myquat[1] + \
myquat[2]*myquat[2] + myquat[3]*myquat[3])
myquat[0] *= norm
myquat[1] *= norm
myquat[2] *= norm
myquat[3] *= norm
return np.array([myquat[0],myquat[1],myquat[2],myquat[3]])
"""
Adds a strand to the system by appending it to the array of previous strands
"""
def add_strands (mynewpositions, mynewa1s, mynewa3s):
overlap = False
# This is a simple check for each of the particles where for previously
# placed particles i we check whether it overlaps with any of the
# newly created particles j
print("## Checking for overlaps", file=sys.stdout)
for i in range(len(positions)):
p = positions[i]
pa1 = a1s[i]
for j in range (len(mynewpositions)):
q = mynewpositions[j]
qa1 = mynewa1s[j]
# skip particles that are anyway too far away
dr = p - q
dr -= box * np.rint (dr / box)
if np.dot(dr, dr) > RC2:
continue
# base site and backbone site of the two particles
p_pos_back = p + pa1 * POS_BACK
p_pos_base = p + pa1 * POS_BASE
q_pos_back = q + qa1 * POS_BACK
q_pos_base = q + qa1 * POS_BASE
# check for no overlap between the two backbone sites
dr = p_pos_back - q_pos_back
dr -= box * np.rint (dr / box)
if np.dot(dr, dr) < RC2_BACK:
overlap = True
# check for no overlap between the two base sites
dr = p_pos_base - q_pos_base
dr -= box * np.rint (dr / box)
if np.dot(dr, dr) < RC2_BASE:
overlap = True
# check for no overlap between backbone site of particle p
# with base site of particle q
dr = p_pos_back - q_pos_base
dr -= box * np.rint (dr / box)
if np.dot(dr, dr) < RC2_BACK_BASE:
overlap = True
# check for no overlap between base site of particle p and
# backbone site of particle q
dr = p_pos_base - q_pos_back
dr -= box * np.rint (dr / box)
if np.dot(dr, dr) < RC2_BACK_BASE:
overlap = True
# exit if there is an overlap
if overlap:
return False
# append to the existing list if no overlap is found
if not overlap:
for p in mynewpositions:
positions.append(p)
for p in mynewa1s:
a1s.append (p)
for p in mynewa3s:
a3s.append (p)
# calculate quaternion from local body frame and append
for ia in range(len(mynewpositions)):
mynewquaternions = exyz_to_quat(mynewa1s[ia],mynewa3s[ia])
quaternions.append(mynewquaternions)
return True
"""
Calculate angle of rotation site to site
"""
def get_angle(bp):
#n, minimal number of bases per turn
n = 10.5
found = False
while found == False:
turns = bp/n
diff = abs( turns - round(turns))
if diff < 0.03:
found = True
turns = round(turns)+lk
angle = (360*turns)/bp
angle = round (angle,2)
#angle =round( 360/n,2)
elif n > 11.5:
angle = 35.9
found = True
else:
n += 0.02
return angle
def get_angle2(bp):
turns = bp/N + lk
angle = (360*turns)/bp
return angle
"""
Returns the rotation matrix defined by an axis and angle
"""
def get_rotation_matrix(axis, anglest, nbp=0):
# The argument anglest can be either an angle in radiants
# (accepted types are float, int or np.float64 or np.float64)
# or a tuple [angle, units] where angle is a number and
# units is a string. It tells the routine whether to use degrees,
# radiants (the default) or base pairs turns.
if not isinstance (anglest, (np.float64, np.float32, float, int)):
if len(anglest) > 1:
if anglest[1] in ["degrees", "deg", "o"]:
angle = (np.pi / 180.) * (anglest[0])
elif anglest[1] in ["bp"]:
if nbp == 0:
angle = int(anglest[0]) * (np.pi / 180.) * (35.9)
else:
ang = get_angle2(nbp)
angle = int(anglest[0]) * (np.pi / 180.) * (ang)
else:
angle = float(anglest[0])
else:
angle = float(anglest[0])
else:
angle = float(anglest) # in degrees (?)
axis = np.array(axis)
axis /= np.sqrt(np.dot(axis, axis))
ct = np.cos(angle)
st = np.sin(angle)
olc = 1. - ct
x, y, z = axis
return np.array([[olc*x*x+ct, olc*x*y-st*z, olc*x*z+st*y],
[olc*x*y+st*z, olc*y*y+ct, olc*y*z-st*x],
[olc*x*z-st*y, olc*y*z+st*x, olc*z*z+ct]])
"""
Generates the position and orientation vectors of a
(single or double) strand from a sequence string
"""
def generate_strand(bp, sequence=None, start_pos=np.array([0, 0, 0]), \
dir=np.array([0, 0, 1]), perp=False, double=True, rot=0.):
# generate empty arrays
mynewpositions, mynewa1s, mynewa3s = [], [], []
# cast the provided start_pos array into a numpy array
start_pos = np.array(start_pos, dtype=float)
# overall direction of the helix
dir = np.array(dir, dtype=float)
#if sequence == None:
# sequence = np.random.randint(1, 5, bp)
# the elseif here is most likely redundant
#elif len(sequence) != bp:
# n = bp - len(sequence)
# sequence += np.random.randint(1, 5, n)
# print("sequence is too short, adding %d random bases" % n, file=sys.stderr)
# normalize direction
dir_norm = np.sqrt(np.dot(dir,dir))
if dir_norm < 1e-10:
print("direction must be a valid vector,\
defaulting to (0, 0, 1)", file=sys.stderr)
dir = np.array([0, 0, 1])
else: dir /= dir_norm
# find a vector orthogonal to dir to act as helix direction,
# if not provided switch off random orientation
if perp is None or perp is False:
v1 = np.random.random_sample(3)
# comment in to suppress randomized base vector
v1 = [1,0,0]
v1 -= dir * (np.dot(dir, v1))
v1 /= np.sqrt(sum(v1*v1))
else:
v1 = perp;
# generate rotational matrix representing the overall rotation of the helix
R0 = get_rotation_matrix(dir, rot)
# rotation matrix corresponding to one step along the helix
R = get_rotation_matrix(dir, [1, "bp"],bp)
# set the vector a1 (backbone to base) to v1
a1 = v1
# apply the global rotation to a1
a1 = np.dot(R0, a1)
# set the position of the fist backbone site to start_pos
rb = np.array(start_pos)
# set a3 to the direction of the helix
a3 = dir
for i in range(bp):
# work out the position of the centre of mass of the nucleotide
rcom = rb - COM_CENTRE_DS * a1
# append to newpositions
mynewpositions.append(rcom)
mynewa1s.append(a1)
mynewa3s.append(a3)
# if we are not at the end of the helix, we work out a1 and rb for the
# next nucleotide along the helix
if i != bp - 1:
a1 = np.dot(R, a1)
rb += a3 * BASE_BASE
# if we are working on a double strand, we do a cycle similar
# to the previous one but backwards
if double == True:
a1 = -a1
a3 = -dir
R = R.transpose()
for i in range(bp):
rcom = rb - COM_CENTRE_DS * a1
mynewpositions.append (rcom)
mynewa1s.append (a1)
mynewa3s.append (a3)
a1 = np.dot(R, a1)
rb += a3 * BASE_BASE
#Calculate the positions of the bead wall
last_base1 = mynewpositions[int( len(mynewpositions)/2 - 1) ]
last_base2 = mynewpositions[int( len(mynewpositions)/2) ]
mid_point = (last_base1 + last_base2) / 2
NN = N_BEADS**2
p1 = [mid_point[0] - (N_BEADS-1)*WALL_PARTICLE_SIZE, mid_point[1] - (N_BEADS-1)*WALL_PARTICLE_SIZE, mid_point[2] + BEAD_OFFSET ]
for i in range(N_BEADS):
for j in range(N_BEADS):
position = [ p1[0] + 2*i*WALL_PARTICLE_SIZE, p1[1] + 2*j*WALL_PARTICLE_SIZE, p1[2]]
mynewa1s.append([1,0,0])
mynewa3s.append([1,0,0])
mynewpositions.append(position)
assert (len (mynewpositions) > 0)
return [mynewpositions, mynewa1s, mynewa3s]
"""
Main function for this script.
Reads a text file with the following format:
- Each line contains the sequence for a single strand (A,C,G,T)
- Lines beginning with the keyword 'DOUBLE' produce double-stranded DNA
Ex: Two ssDNA (single stranded DNA)
ATATATA
GCGCGCG
Ex: Two strands, one double stranded, the other single stranded.
DOUBLE AGGGCT
CCTGTA
"""
def read_strands(filename):
try:
infile = open (filename)
except:
print("Could not open file '%s'. Aborting." % filename, file=sys.stderr)
sys.exit(2)
# This block works out the number of nucleotides and strands by reading
# the number of non-empty lines in the input file and the number of letters,
# taking the possible DOUBLE keyword into account.
nstrands, nnucl, nbonds = 0, 0, 0
lines = infile.readlines()
for line in lines:
line = line.upper().strip()
if len(line) == 0:
continue
if line[:6] == 'DOUBLE':
line = line.split()[1]
length = len(line)
print("## Found duplex of %i base pairs" % length, file=sys.stdout)
nnucl += 2*length
nstrands += 2
nbonds+= 2*length
else:
line = line.split()[0]
length = len(line)
print("## Found single strand of %i bases" % length, file=sys.stdout)
nnucl += length
nstrands += 1
if topo == 'ring':
nbonds =+ length
else:
nbonds += length+1
# rewind the sequence input file
infile.seek(0)
print("## nstrands, nnucl = ", nstrands, nnucl, file=sys.stdout)
# generate the data file in LAMMPS format
try:
out = open ("data.oxdna", "w")
except:
print("Could not open data file for writing. Aborting.", file=sys.stderr)
sys.exit(2)
lines = infile.readlines()
nlines = len(lines)
i = 1
myns = 0
noffset = 1
for line in lines:
line = line.upper().strip()
# skip empty lines
if len(line) == 0:
i += 1
continue
# block for duplexes: last argument of the generate function
# is set to 'True'
if line[:6] == 'DOUBLE':
line = line.split()[1]
length = len(line)
seq = [(base_to_number[x]) for x in line]
seq = np.array(seq,dtype=int)
n_a, n_c, n_g, n_t = 0, 0, 0, 0
for s in range(seq.size):
if seq[s] == 1:
n_a += 1
elif seq[s] == 2:
n_c += 1
elif seq[s] ==3:
n_g += 1
elif seq[s] == 4:
n_t += 1
smallest_n_bases = n_c
if n_a < n_c:
smallest_n_bases = n_a
if smallest_n_bases > n_t:
smallest_n_bases = n_t
if smallest_n_bases > n_g:
smallest_n_bases = n_g
if smallest_n_bases < N_BASE_TYPES:
print('## Not enough occurrences of base types in the sequence for ' + str(N_BASE_TYPES))
print('## unique base types, switching to ' + str(smallest_n_bases) + ' unique types')
else:
smallest_n_bases = N_BASE_TYPES
a, c, g, t = -3, -2, -1, 0
for s in range(seq.size):
if seq[s] == 1:
if a < (smallest_n_bases*4-3):
a += 4
else:
a = 1
seq[s] = a
elif seq[s] == 2:
if c < (smallest_n_bases*4-2):
c += 4
else:
c = 2
seq[s] = c
elif seq[s] == 3:
if g < (smallest_n_bases*4-1):
g += 4
else:
g = 3
seq[s] = g
elif seq[s] == 4:
if t < (smallest_n_bases*4):
t += 4
else:
t = 4
seq[s] = t
myns += 1
for b in range(length):
basetype.append(seq[b])
strandnum.append(myns)
for b in range(length-1):
bondpair = [noffset + b, noffset + b + 1]
bonds.append(bondpair)
noffset += length
# create the sequence of the second strand as made of
# complementary bases
#seq2 = [5-s for s in seq]
seq2 = seq
for s in range(seq2.size):
if seq2[s]%4 == 1:
seq2[s] += 3
elif seq2[s]%4 == 2:
seq2[s] += 1
elif seq2[s]%4 == 3:
seq2[s] -= 1
elif seq2[s]%4 == 0:
seq2[s] -= 3
#seq2.reverse()
myns += 1
for b in range(length):
basetype.append(seq2[b])
strandnum.append(myns)
for b in range(length-1):
bondpair = [noffset + b, noffset + b + 1]
bonds.append(bondpair)
#create wall bead types
bead_type = 4*smallest_n_bases + 1
for i in range(N_BEADS**2):
basetype.append(bead_type)
basetype.append(bead_type)
strandnum.append(bead_type)
strandnum.append(bead_type)
#bonds.append([length, noffset + length])
#bonds.append([length+1, noffset + length])
noffset += length
print("## Created duplex of %i bases" % (2*length), file=sys.stdout)
# generate random position of the first nucleotide
com = box_offset + np.random.random_sample(3) * box
# comment out to randomize
com = [0,0,0]
# generate the random direction of the helix
axis = np.random.random_sample(3)
# comment out to randomize
axis = [0,0,1]
axis /= np.sqrt(np.dot(axis, axis))
# use the generate function defined above to create
# the position and orientation vector of the strand
if topo == 'ring':
newpositions, newa1s, newa3s = generate_ring(len(line), \
sequence=seq, dir=axis, start_pos=com, double=True)
else:
newpositions, newa1s, newa3s = generate_strand(len(line), \
sequence=seq, dir=axis, start_pos=com, double=True)
# generate a new position for the strand until it does not overlap
# with anything already present
start = timer()
while not add_strands(newpositions, newa1s, newa3s):
com = box_offset + np.random.random_sample(3) * box
axis = np.random.random_sample(3)
axis /= np.sqrt(np.dot(axis, axis))
if topo == 'ring':
newpositions, newa1s, newa3s = generate_ring(len(line), \
sequence=seq, dir=axis, start_pos=com, double=True)
else:
newpositions, newa1s, newa3s = generate_strand(len(line), \
sequence=seq, dir=axis, start_pos=com, double=True)
print("## Trying %i" % i, file=sys.stdout)
end = timer()
print("## Added duplex of %i bases (line %i/%i) in %.2fs, now at %i/%i" % \
(2*length, i, nlines, end-start, len(positions), nnucl), file=sys.stdout)
# block for single strands: last argument of the generate function
# is set to 'False'
else:
length = len(line)
seq = [(base_to_number[x]) for x in line]
myns += 1
for b in range(length):
basetype.append(seq[b])
strandnum.append(myns)
for b in range(length-1):
bondpair = [noffset + b, noffset + b + 1]
bonds.append(bondpair)
if topo == 'ring':
bondpair = [noffset, noffset + length-1]
bonds.append(bondpair)
noffset += length
# generate random position of the first nucleotide
com = box_offset + np.random.random_sample(3) * box
# comment out to randomize
com = [-30,0,0]
# generate the random direction of the helix
axis = np.random.random_sample(3)
# comment out to randomize
axis = [0,0,1]
axis /= np.sqrt(np.dot(axis, axis))
print("## Created single strand of %i bases" % length, file=sys.stdout)
if topo == 'ring':
newpositions, newa1s, newa3s = generate_ring(length, \
sequence=seq, dir=axis, start_pos=com, double=False)
else:
newpositions, newa1s, newa3s = generate_strand(length, \
sequence=seq, dir=axis, start_pos=com, double=False)
start = timer()
while not add_strands(newpositions, newa1s, newa3s):
com = box_offset + np.random.random_sample(3) * box
axis = np.random.random_sample(3)
axis /= np.sqrt(np.dot(axis, axis))
if topo == 'ring':
newpositions, newa1s, newa3s = generate_ring(length, \
sequence=seq, dir=axis, start_pos=com, double=False)
else:
newpositions, newa1s, newa3s = generate_strand(length, \
sequence=seq, dir=axis, start_pos=com, double=False)
print("## Trying %i" % (i), file=sys.stdout)
end = timer()
print("## Added single strand of %i bases (line %i/%i) in %.2fs, now at %i/%i" % \
(length, i, nlines, end-start,len(positions), nnucl), file=sys.stdout)
i += 1
# sanity check
#if not len(positions) == nnucl:
# print(len(positions), nnucl)
# raise AssertionError
nnucl = nnucl + (N_BEADS**2)
nbonds -= 4
out.write('# LAMMPS data file\n')
out.write('%d atoms\n' % nnucl)
out.write('%d ellipsoids\n' % nnucl)
out.write('%d bonds\n' % nbonds)
out.write('\n')
out.write('%d atom types\n' %bead_type )
out.write('1 bond types\n')
out.write('\n')
out.write('# System size\n')
out.write('%f %f xlo xhi\n' % (box_offset,box_offset+box_length))
out.write('%f %f ylo yhi\n' % (box_offset,box_offset+box_length))
out.write('%f %f zlo zhi\n' % (0,box_length))
#out.write('\n')
#out.write('Masses\n')
#out.write('\n')
#out.write('1 3.1575\n')
#out.write('2 3.1575\n')
#out.write('3 3.1575\n')
#out.write('4 3.1575\n')
#out.write('5 3.1575\n')
# for each nucleotide print a line under the headers
# Atoms, Velocities, Ellipsoids and Bonds
out.write('\n')
out.write(\
'# Atom-ID, type, position, molecule-ID, ellipsoid flag, density\n')
out.write('Atoms\n')
out.write('\n')
for i in range(nnucl):
out.write('%d %d %22.15le %22.15le %22.15le %d 1 1\n' \
% (i+1, basetype[i], \
positions[i][0], positions[i][1], positions[i][2], \
strandnum[i]))
out.write('\n')
out.write('# Atom-ID, translational, rotational velocity\n')
out.write('Velocities\n')
out.write('\n')
for i in range(nnucl):
out.write("%d %22.15le %22.15le %22.15le %22.15le %22.15le %22.15le\n" \
% (i+1,0.0,0.0,0.0,0.0,0.0,0.0))
out.write('\n')
out.write('# Atom-ID, shape, quaternion\n')
out.write('Ellipsoids\n')
out.write('\n')
for i in range(nnucl):
out.write(\
"%d %22.15le %22.15le %22.15le %22.15le %22.15le %22.15le %22.15le\n" \
% (i+1,1.1739845031423408,1.1739845031423408,1.1739845031423408, \
quaternions[i][0],quaternions[i][1], quaternions[i][2],quaternions[i][3]))
out.write('\n')
out.write('# Bond topology\n')
out.write('Bonds\n')
out.write('\n')
for i in range(nbonds):
if i < nbonds-2:
out.write("%d %d %d %d\n" % (i+1,1,bonds[i][0],bonds[i][1]))
#else:
#out.write("%d %d %d %d\n" % (i+1,2,bonds[i][0],bonds[i][1]))
out.close()
print("## Wrote data to 'data.oxdna'", file=sys.stdout)
print("## DONE", file=sys.stdout)
# call the above main() function, which executes the program
read_strands (infile)
end_time=timer()
runtime = end_time-start_time
hours = runtime/3600
minutes = (runtime-np.rint(hours)*3600)/60
seconds = (runtime-np.rint(hours)*3600-np.rint(minutes)*60)%60
print("## Total runtime %ih:%im:%.2fs" % (hours,minutes,seconds), file=sys.stdout)
|
jeremiahyan/lammps
|
examples/PACKAGES/cgdna/examples/oxDNA2/unique_bp/generate_unique.py
|
Python
|
gpl-2.0
| 26,819
|
[
"LAMMPS"
] |
b61244ce4cbe147fb0db554e01868dd84e447c23cbaf5cf070c50b36c629858d
|
'''
This migration script fixes the data corruption caused in the form_values
table (content json field) by migrate script 65.
'''
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
import binascii
from galaxy.util.json import loads, dumps
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def _sniffnfix_pg9_hex(value):
"""
Sniff for and fix postgres 9 hex decoding issue
"""
try:
if value[0] == 'x':
return binascii.unhexlify(value[1:])
else:
return value
except Exception, ex:
return value
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
cmd = "SELECT form_values.id as id, form_values.content as field_values, form_definition.fields as fdfields " \
+ " FROM form_definition, form_values " \
+ " WHERE form_values.form_definition_id=form_definition.id " \
+ " ORDER BY form_values.id"
result = migrate_engine.execute( cmd )
corrupted_rows = 0
for row in result:
# first check if loading the dict from the json succeeds
# if that fails, it means that the content field is corrupted.
try:
field_values_dict = loads( _sniffnfix_pg9_hex( str( row['field_values'] ) ) )
except Exception, e:
corrupted_rows = corrupted_rows + 1
# content field is corrupted
fields_list = loads( _sniffnfix_pg9_hex( str( row['fdfields'] ) ) )
field_values_str = _sniffnfix_pg9_hex( str( row['field_values'] ) )
try:
#Encoding errors? Just to be safe.
print "Attempting to fix row %s" % row['id']
print "Prior to replacement: %s" % field_values_str
except:
pass
field_values_dict = {}
# look for each field name in the values and extract its value (string)
for index in range( len(fields_list) ):
field = fields_list[index]
field_name_key = '"%s": "' % field['name']
field_index = field_values_str.find( field_name_key )
if field_index == -1:
# if the field name is not present the field values dict then
# inform the admin that this form values cannot be fixed
print "The 'content' field of row 'id' %i does not have the field '%s' in the 'form_values' table and could not be fixed by this migration script." % ( int( field['id'] ), field['name'] )
else:
# check if this is the last field
if index == len( fields_list )-1:
# since this is the last field, the value string lies between the
# field name and the '"}' string at the end, hence len(field_values_str)-2
value = field_values_str[ field_index+len( field_name_key ):len( field_values_str )-2 ]
else:
# if this is not the last field then the value string lies between
# this field name and the next field name
next_field = fields_list[index+1]
next_field_index = field_values_str.find( '", "%s": "' % next_field['name'] )
value = field_values_str[ field_index+len( field_name_key ):next_field_index ]
# clean up the value string, escape the required quoutes and newline characters
value = value.replace( "'", "\''" )\
.replace( '"', '\\\\"' )\
.replace( '\r', "\\\\r" )\
.replace( '\n', "\\\\n" )\
.replace( '\t', "\\\\t" )
# add to the new values dict
field_values_dict[ field['name'] ] = value
# update the db
json_values = dumps(field_values_dict)
cmd = "UPDATE form_values SET content='%s' WHERE id=%i" %( json_values, int( row['id'] ) )
migrate_engine.execute( cmd )
try:
print "Post replacement: %s" % json_values
except:
pass
if corrupted_rows:
print 'Fixed %i corrupted rows.' % corrupted_rows
else:
print 'No corrupted rows found.'
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0076_fix_form_values_data_corruption.py
|
Python
|
gpl-3.0
| 4,588
|
[
"Galaxy"
] |
0dc14d8ea5cc5bfd3576a74d34eb749470a16e3426afd8657136ba37f359952d
|
#!/usr/bin/env python
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import os
from unittest import TestCase, main
import tempfile
import h5py
import numpy as np
from future.utils.six import StringIO, BytesIO
from qiita_db.metadata_template import SampleTemplate, PrepTemplate
from qiita_ware.util import (per_sample_sequences, template_to_dict,
metadata_stats_from_sample_and_prep_templates,
metadata_map_from_sample_and_prep_templates,
stats_from_df, dataframe_from_template,
open_file, _is_string_or_bytes)
def mock_sequence_iter(items):
return ({'SequenceID': sid, 'Sequence': seq} for sid, seq in items)
class UtilTests(TestCase):
def setUp(self):
np.random.seed(123)
def test_per_sample_sequences_simple(self):
max_seqs = 10
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('b_2', 'AATTGGCC-b2'),
('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('b_1', 'AATTGGCC-b1'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_min_seqs(self):
max_seqs = 10
min_seqs = 3
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs,
min_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_complex(self):
max_seqs = 2
exp = sorted([('b_2', 'AATTGGCC-b2'),
('b_1', 'AATTGGCC-b1'),
('a_2', 'AATTGGCC-a2'),
('a_3', 'AATTGGCC-a3'),
('c_1', 'AATTGGCC-c1'),
('c_2', 'AATTGGCC-c2')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_metadata_stats_from_sample_and_prep_templates(self):
obs = metadata_stats_from_sample_and_prep_templates(SampleTemplate(1),
PrepTemplate(1))
for k in obs:
self.assertEqual(obs[k], SUMMARY_STATS[k])
def test_metadata_map_from_sample_and_prep_templates(self):
obs = metadata_map_from_sample_and_prep_templates(SampleTemplate(1),
PrepTemplate(1))
# We don't test the specific values as this would blow up the size
# of this file as the amount of lines would go to ~1000
# 27 samples
self.assertEqual(len(obs), 27)
self.assertTrue(set(obs.index), {
u'SKB1.640202', u'SKB2.640194', u'SKB3.640195', u'SKB4.640189',
u'SKB5.640181', u'SKB6.640176', u'SKB7.640196', u'SKB8.640193',
u'SKB9.640200', u'SKD1.640179', u'SKD2.640178', u'SKD3.640198',
u'SKD4.640185', u'SKD5.640186', u'SKD6.640190', u'SKD7.640191',
u'SKD8.640184', u'SKD9.640182', u'SKM1.640183', u'SKM2.640199',
u'SKM3.640197', u'SKM4.640180', u'SKM5.640177', u'SKM6.640187',
u'SKM7.640188', u'SKM8.640201', u'SKM9.640192'})
self.assertTrue(set(obs.columns), {
u'tot_org_carb', u'common_name', u'has_extracted_data',
u'required_sample_info_status', u'water_content_soil',
u'env_feature', u'assigned_from_geo', u'altitude', u'env_biome',
u'texture', u'has_physical_specimen', u'description_duplicate',
u'physical_location', u'latitude', u'ph', u'host_taxid',
u'elevation', u'description', u'collection_timestamp',
u'taxon_id', u'samp_salinity', u'host_subject_id', u'sample_type',
u'season_environment', u'temp', u'country', u'longitude',
u'tot_nitro', u'depth', u'anonymized_name', u'target_subfragment',
u'sample_center', u'samp_size', u'run_date', u'experiment_center',
u'pcr_primers', u'center_name', u'barcodesequence', u'run_center',
u'run_prefix', u'library_construction_protocol', u'emp_status',
u'linkerprimersequence', u'experiment_design_description',
u'target_gene', u'center_project_name', u'illumina_technology',
u'sequencing_meth', u'platform', u'experiment_title',
u'study_center'})
def template_to_dict(self):
template = PrepTemplate(1)
obs = template_to_dict(template)
# We don't test the specific values as this would blow up the size
# of this file as the amount of lines would go to ~1000
# twenty seven samples
self.assertEqual(len(obs.keys()), 27)
# the mapping file has 24 columns
for key, value in obs.items():
# check there are exatly these column names in the dictionary
self.assertItemsEqual(value.keys(), [
'experiment_center', 'center_name', 'run_center',
'run_prefix', 'data_type_id', 'target_gene',
'sequencing_meth', 'run_date', 'pcr_primers',
'ebi_submission_accession', 'linkerprimersequence',
'platform', 'library_construction_protocol',
'experiment_design_description', 'study_center',
'center_project_name', 'sample_center', 'samp_size',
'illumina_technology', 'experiment_title', 'emp_status',
'target_subfragment', 'barcodesequence',
'ebi_study_accession'])
def test_stats_from_df(self):
obs = stats_from_df(dataframe_from_template(SampleTemplate(1)))
for k in obs:
self.assertEqual(obs[k], SUMMARY_STATS[k])
def test_dataframe_from_template(self):
template = PrepTemplate(1)
obs = dataframe_from_template(template)
# 27 samples
self.assertEqual(len(obs), 27)
self.assertTrue(set(obs.index), {
u'SKB1.640202', u'SKB2.640194', u'SKB3.640195', u'SKB4.640189',
u'SKB5.640181', u'SKB6.640176', u'SKB7.640196', u'SKB8.640193',
u'SKB9.640200', u'SKD1.640179', u'SKD2.640178', u'SKD3.640198',
u'SKD4.640185', u'SKD5.640186', u'SKD6.640190', u'SKD7.640191',
u'SKD8.640184', u'SKD9.640182', u'SKM1.640183', u'SKM2.640199',
u'SKM3.640197', u'SKM4.640180', u'SKM5.640177', u'SKM6.640187',
u'SKM7.640188', u'SKM8.640201', u'SKM9.640192'})
self.assertTrue(set(obs.columns), {
u'tot_org_carb', u'common_name', u'has_extracted_data',
u'required_sample_info_status', u'water_content_soil',
u'env_feature', u'assigned_from_geo', u'altitude', u'env_biome',
u'texture', u'has_physical_specimen', u'description_duplicate',
u'physical_location', u'latitude', u'ph', u'host_taxid',
u'elevation', u'description', u'collection_timestamp',
u'taxon_id', u'samp_salinity', u'host_subject_id', u'sample_type',
u'season_environment', u'temp', u'country', u'longitude',
u'tot_nitro', u'depth', u'anonymized_name', u'target_subfragment',
u'sample_center', u'samp_size', u'run_date', u'experiment_center',
u'pcr_primers', u'center_name', u'barcodesequence', u'run_center',
u'run_prefix', u'library_construction_protocol', u'emp_status',
u'linkerprimersequence', u'experiment_design_description',
u'target_gene', u'center_project_name', u'illumina_technology',
u'sequencing_meth', u'platform', u'experiment_title',
u'study_center'})
class TestFilePathOpening(TestCase):
"""Tests adapted from scikit-bio's skbio.io.util tests"""
def test_is_string_or_bytes(self):
self.assertTrue(_is_string_or_bytes('foo'))
self.assertTrue(_is_string_or_bytes(u'foo'))
self.assertTrue(_is_string_or_bytes(b'foo'))
self.assertFalse(_is_string_or_bytes(StringIO('bar')))
self.assertFalse(_is_string_or_bytes([1]))
def test_file_closed(self):
"""File gets closed in decorator"""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
with open_file(filepath) as fh:
pass
self.assertTrue(fh.closed)
def test_file_closed_harder(self):
"""File gets closed in decorator, even if exceptions happen."""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
try:
with open_file(filepath) as fh:
raise TypeError
except TypeError:
self.assertTrue(fh.closed)
else:
# If we're here, no exceptions have been raised inside the
# try clause, so the context manager swallowed them. No
# good.
raise Exception("`open_file` didn't propagate exceptions")
def test_filehandle(self):
"""Filehandles slip through untouched"""
with tempfile.TemporaryFile('r') as fh:
with open_file(fh) as ffh:
self.assertTrue(fh is ffh)
# And it doesn't close the file-handle
self.assertFalse(fh.closed)
def test_StringIO(self):
"""StringIO (useful e.g. for testing) slips through."""
f = StringIO("File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_BytesIO(self):
"""BytesIO (useful e.g. for testing) slips through."""
f = BytesIO(b"File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO(self):
f = h5py.File('test', driver='core', backing_store=False)
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO_open(self):
name = None
with tempfile.NamedTemporaryFile(delete=False) as fh:
name = fh.name
fh.close()
h5file = h5py.File(name, 'w')
h5file.close()
with open_file(name) as fh_inner:
self.assertTrue(isinstance(fh_inner, h5py.File))
os.remove(name)
# comment indicates the expected random value
sequences = [
('a_1', 'AATTGGCC-a1'), # 2, 3624216819017203053
('a_2', 'AATTGGCC-a2'), # 5, 5278339153051796802
('b_1', 'AATTGGCC-b1'), # 4, 4184670734919783522
('b_2', 'AATTGGCC-b2'), # 0, 946590342492863505
('a_4', 'AATTGGCC-a4'), # 3, 4048487933969823850
('a_3', 'AATTGGCC-a3'), # 7, 7804936597957240377
('c_1', 'AATTGGCC-c1'), # 8, 8868534167180302049
('a_5', 'AATTGGCC-a5'), # 1, 3409506807702804593
('c_2', 'AATTGGCC-c2'), # 9, 8871627813779918895
('c_3', 'AATTGGCC-c3') # 6, 7233291490207274528
]
SUMMARY_STATS = {
'altitude': [('0.0', 27)],
'anonymized_name': [('SKB1', 1),
('SKB2', 1),
('SKB3', 1),
('SKB4', 1),
('SKB5', 1),
('SKB6', 1),
('SKB7', 1),
('SKB8', 1),
('SKB9', 1),
('SKD1', 1),
('SKD2', 1),
('SKD3', 1),
('SKD4', 1),
('SKD5', 1),
('SKD6', 1),
('SKD7', 1),
('SKD8', 1),
('SKD9', 1),
('SKM1', 1),
('SKM2', 1),
('SKM3', 1),
('SKM4', 1),
('SKM5', 1),
('SKM6', 1),
('SKM7', 1),
('SKM8', 1),
('SKM9', 1)],
'assigned_from_geo': [('n', 27)],
'barcodesequence': [('AACTCCTGTGGA', 1),
('ACCTCAGTCAAG', 1),
('ACGCACATACAA', 1),
('AGCAGGCACGAA', 1),
('AGCGCTCACATC', 1),
('ATATCGCGATGA', 1),
('ATGGCCTGACTA', 1),
('CATACACGCACC', 1),
('CCACCCAGTAAC', 1),
('CCGATGCCTTGA', 1),
('CCTCGATGCAGT', 1),
('CCTCTGAGAGCT', 1),
('CGAGGTTCTGAT', 1),
('CGCCGGTAATCT', 1),
('CGGCCTAAGTTC', 1),
('CGTAGAGCTCTC', 1),
('CGTGCACAATTG', 1),
('GATAGCACTCGT', 1),
('GCGGACTATTCA', 1),
('GTCCGCAAGTTA', 1),
('TAATGGTCGTAG', 1),
('TAGCGCGAACTT', 1),
('TCGACCAAACAC', 1),
('TGAGTGGTCTGT', 1),
('TGCTACAGACGT', 1),
('TGGTTATGGCAC', 1),
('TTGCACCGTCGA', 1)],
'center_name': [('ANL', 27)],
'center_project_name': [('None', 27)],
'collection_timestamp': [('2011-11-11 13:00:00', 27)],
'common_name': [('rhizosphere metagenome', 9),
('root metagenome', 9),
('soil metagenome', 9)],
'country': [('GAZ:United States of America', 27)],
'data_type_id': [('2', 27)],
'depth': [('0.15', 27)],
'description': [('Cannabis Soil Microbiome', 27)],
'description_duplicate': [('Bucu Rhizo', 3),
('Bucu Roots', 3),
('Bucu bulk', 3),
('Burmese Rhizo', 3),
('Burmese bulk', 3),
('Burmese root', 3),
('Diesel Rhizo', 3),
('Diesel Root', 3),
('Diesel bulk', 3)],
'ebi_study_accession': [('None', 27)],
'ebi_submission_accession': [('None', 27)],
'elevation': [('114.0', 27)],
'emp_status': [('EMP', 27)],
'env_biome': [('ENVO:Temperate grasslands, savannas, and shrubland biome',
27)],
'env_feature': [('ENVO:plant-associated habitat', 27)],
'experiment_center': [('ANL', 27)],
'experiment_design_description': [('micro biome of soil and rhizosphere '
'of cannabis plants from CA', 27)],
'experiment_title': [('Cannabis Soil Microbiome', 27)],
'has_extracted_data': [('True', 27)],
'has_physical_specimen': [('True', 27)],
'host_subject_id': [('1001:B1', 1),
('1001:B2', 1),
('1001:B3', 1),
('1001:B4', 1),
('1001:B5', 1),
('1001:B6', 1),
('1001:B7', 1),
('1001:B8', 1),
('1001:B9', 1),
('1001:D1', 1),
('1001:D2', 1),
('1001:D3', 1),
('1001:D4', 1),
('1001:D5', 1),
('1001:D6', 1),
('1001:D7', 1),
('1001:D8', 1),
('1001:D9', 1),
('1001:M1', 1),
('1001:M2', 1),
('1001:M3', 1),
('1001:M4', 1),
('1001:M5', 1),
('1001:M6', 1),
('1001:M7', 1),
('1001:M8', 1),
('1001:M9', 1)],
'host_taxid': [('3483', 27)],
'illumina_technology': [('MiSeq', 27)],
'latitude': [('0.291867635913', 1),
('3.21190859967', 1),
('4.59216095574', 1),
('10.6655599093', 1),
('12.6245524972', 1),
('12.7065957714', 1),
('13.089194595', 1),
('23.1218032799', 1),
('29.1499460692', 1),
('31.7167821863', 1),
('35.2374368957', 1),
('38.2627021402', 1),
('40.8623799474', 1),
('43.9614715197', 1),
('44.9725384282', 1),
('53.5050692395', 1),
('57.571893782', 1),
('60.1102854322', 1),
('63.6505562766', 1),
('68.0991287718', 1),
('68.51099627', 1),
('74.0894932572', 1),
('78.3634273709', 1),
('82.8302905615', 1),
('84.0030227585', 1),
('85.4121476399', 1),
('95.2060749748', 1)],
'library_construction_protocol': [('This analysis was done as in Caporaso '
'et al 2011 Genome research. The PCR '
'primers (F515/R806) were developed '
'against the V4 region of the 16S rRNA '
'(both bacteria and archaea), which we '
'determined would yield optimal '
'community clustering with reads of '
'this length using a procedure '
'similar to that of ref. 15. [For '
'reference, this primer pair amplifies '
'the region 533_786 in the Escherichia '
'coli strain 83972 sequence '
'(greengenes accession no. '
'prokMSA_id:470367).] The reverse PCR '
'primer is barcoded with a 12-base '
'error-correcting Golay code to '
'facilitate multiplexing of up '
'to 1,500 samples per lane, and both '
'PCR primers contain sequencer adapter '
'regions.', 27)],
'linkerprimersequence': [('GTGCCAGCMGCCGCGGTAA', 27)],
'longitude': [
('2.35063674718', 1),
('3.48274264219', 1),
('6.66444220187', 1),
('15.6526750776', 1),
('26.8138925876', 1),
('27.3592668624', 1),
('31.2003474585', 1),
('31.6056761814', 1),
('32.5563076447', 1),
('34.8360987059', 1),
('42.838497795', 1),
('63.5115213108', 1),
('65.3283470202', 1),
('66.1920014699', 1),
('66.8954849864', 1),
('68.5041623253', 1),
('68.5945325743', 1),
('70.784770579', 1),
('74.423907894', 1),
('74.7123248382', 1),
('82.1270418227', 1),
('82.8516734159', 1),
('84.9722975792', 1),
('86.3615778099', 1),
('92.5274472082', 1),
('95.5088566087', 1),
('96.0693176066', 1)],
'pcr_primers': [('FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 27)],
'ph': [('6.8', 9), ('6.82', 10), ('6.94', 8)],
'physical_location': [('ANL', 27)],
'platform': [('Illumina', 27)],
'required_sample_info_status': [('completed', 27)],
'run_center': [('ANL', 27)],
'run_date': [('8/1/12', 27)],
'run_prefix': [('s_G1_L001_sequences', 27)],
'samp_salinity': [('7.1', 9), ('7.15', 9), ('7.44', 9)],
'samp_size': [('.25,g', 27)],
'sample_center': [('ANL', 27)],
'sample_type': [('ENVO:soil', 27)],
'season_environment': [('winter', 27)],
'sequencing_meth': [('Sequencing by synthesis', 27)],
'study_center': [('CCME', 27)],
'target_gene': [('16S rRNA', 27)],
'target_subfragment': [('V4', 27)],
'taxon_id': [('410658', 9), ('939928', 9), ('1118232', 9)],
'temp': [('15.0', 27)],
'texture': [('63.1 sand, 17.7 silt, 19.2 clay', 9),
('64.6 sand, 17.6 silt, 17.8 clay', 9),
('66 sand, 16.3 silt, 17.7 clay', 9)],
'tot_nitro': [('1.3', 9), ('1.41', 9), ('1.51', 9)],
'tot_org_carb': [('3.31', 9), ('4.32', 9), ('5.0', 9)],
'water_content_soil': [('0.101', 9), ('0.164', 9), ('0.178', 9)]}
if __name__ == '__main__':
main()
|
wasade/qiita
|
qiita_ware/test/test_util.py
|
Python
|
bsd-3-clause
| 21,675
|
[
"scikit-bio"
] |
75c32acefad4ec03dcf3eb2b65abcc0e59e0cc1ddd495511abbd5b3613db3bbc
|
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
ExtractorError,
float_or_none,
get_element_by_class,
int_or_none,
parse_duration,
parse_iso8601,
try_get,
unescapeHTML,
urlencode_postdata,
urljoin,
)
from ..compat import (
compat_etree_fromstring,
compat_HTTPError,
compat_urlparse,
)
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_ID_REGEX = r'[pb][\da-z]{7}'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?bbc\.co\.uk/
(?:
programmes/(?!articles/)|
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
music/clips[/#]|
radio/player/
)
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
''' % _ID_REGEX
_LOGIN_URL = 'https://account.bbc.com/signin'
_NETRC_MACHINE = 'bbc'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams with even better quality that pc mediaset but fails
# with geolocation in some cases when it's even not geo restricted at all (e.g.
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
_NAMESPACES = (
_MEDIASELECTION_NS,
_EMP_PLAYLIST_NS,
)
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
'url': 'http://www.bbc.co.uk/music/clips/p022h44b',
'note': 'Audio',
'info_dict': {
'id': 'p022h44j',
'ext': 'flv',
'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances',
'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.",
'duration': 227,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
# iptv-all mediaset fails with geolocation however there is no geo restriction
# for this programme at all
'url': 'http://www.bbc.co.uk/programmes/b06rkn85',
'info_dict': {
'id': 'b06rkms3',
'ext': 'flv',
'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1",
'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!",
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Now it\'s really geo-restricted',
}, {
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
'info_dict': {
'id': 'p028bfkj',
'ext': 'flv',
'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
'only_matching': True,
}
]
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading signin page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
post_url = urljoin(self._LOGIN_URL, self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url'))
response, urlh = self._download_webpage_handle(
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
headers={'Referer': self._LOGIN_URL})
if self._LOGIN_URL in urlh.geturl():
error = clean_html(get_element_by_class('form-message', response))
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
class MediaSelectionError(Exception):
def __init__(self, id):
self.id = id
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_items(self, playlist):
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
def _findall_ns(self, element, xpath):
elements = []
for ns in self._NAMESPACES:
elements.extend(element.findall(xpath % ns))
return elements
def _extract_medias(self, media_selection):
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
if error is None:
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
return self._findall_ns(media_selection, './{%s}media')
def _extract_connections(self, media):
return self._findall_ns(media, './{%s}connection')
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _raise_extractor_error(self, media_selection_error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
expected=True)
def _download_media_selector(self, programme_id):
last_exception = None
for mediaselector_url in self._MEDIASELECTOR_URLS:
try:
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
last_exception = e
continue
self._raise_extractor_error(e)
self._raise_extractor_error(last_exception)
def _download_media_selector_url(self, url, programme_id=None):
try:
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404):
media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
else:
raise
return self._process_media_selector(media_selection, programme_id)
def _process_media_selector(self, media_selection, programme_id):
formats = []
subtitles = None
urls = []
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind in ('video', 'audio'):
bitrate = int_or_none(media.get('bitrate'))
encoding = media.get('encoding')
service = media.get('service')
width = int_or_none(media.get('width'))
height = int_or_none(media.get('height'))
file_size = int_or_none(media.get('media_file_size'))
for connection in self._extract_connections(media):
href = connection.get('href')
if href in urls:
continue
if href:
urls.append(href)
conn_kind = connection.get('kind')
protocol = connection.get('protocol')
supplier = connection.get('supplier')
transfer_format = connection.get('transferFormat')
format_id = supplier or conn_kind or protocol
if service:
format_id = '%s_%s' % (service, format_id)
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, format_id),
})
elif transfer_format == 'dash':
formats.extend(self._extract_mpd_formats(
href, programme_id, mpd_id=format_id, fatal=False))
elif transfer_format == 'hls':
formats.extend(self._extract_m3u8_formats(
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
if re.search(self._USP_RE, href):
usp_formats = self._extract_m3u8_formats(
re.sub(self._USP_RE, r'/\1.ism/\1.m3u8', href),
programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
for f in usp_formats:
if f.get('height') and f['height'] > 720:
continue
formats.append(f)
elif transfer_format == 'hds':
formats.extend(self._extract_f4m_formats(
href, programme_id, f4m_id=format_id, fatal=False))
else:
if not service and not supplier and bitrate:
format_id += '-%d' % bitrate
fmt = {
'format_id': format_id,
'filesize': file_size,
}
if kind == 'video':
fmt.update({
'width': width,
'height': height,
'tbr': bitrate,
'vcodec': encoding,
})
else:
fmt.update({
'abr': bitrate,
'acodec': encoding,
'vcodec': 'none',
})
if protocol in ('http', 'https'):
# Direct link
fmt.update({
'url': href,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
fmt.update({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
})
else:
continue
formats.append(fmt)
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind not in ('programme', 'radioProgramme'):
continue
programme_id = item.get('vpid')
duration = int_or_none(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
return self._process_legacy_playlist(playlist_id)
def _process_legacy_playlist_url(self, url, display_id):
playlist = self._download_legacy_playlist_url(url, display_id)
return self._extract_from_legacy_playlist(playlist, display_id)
def _process_legacy_playlist(self, playlist_id):
return self._process_legacy_playlist_url(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
def _download_legacy_playlist_url(self, url, playlist_id=None):
return self._download_xml(
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind not in ('programme', 'radioProgramme'):
continue
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
description = description_el.text if description_el is not None else None
def get_programme_id(item):
def get_from_attributes(item):
for p in('identifier', 'group'):
value = item.get(p)
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
else:
formats, subtitles = self._process_media_selector(item, playlist_id)
programme_id = playlist_id
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
(r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>',
r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title')
description = self._search_regex(
(r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'),
webpage, 'description', default=None)
if not description:
description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class BBCIE(BBCCoUkIE):
IE_NAME = 'bbc'
IE_DESC = 'BBC'
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams but fails with geolocation in some cases when it's
# even not geo restricted at all
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
# Provides fewer formats, but works everywhere for everybody (hopefully)
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
]
_TESTS = [{
# article with multiple videos embedded with data-playable containing vpids
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
'title': 'Russia stages massive WW2 parade despite Western boycott',
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
},
'playlist_count': 2,
}, {
# article with multiple videos embedded with data-playable (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
'title': 'Farnborough Airshow: Video highlights',
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
},
'playlist_count': 9,
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
# broken
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
'title': 'BUGGER',
},
'playlist_count': 18,
}, {
# single video embedded with data-playable containing vpid
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
'description': 'md5:2868290467291b37feda7863f7a83f54',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with single video embedded with data-playable containing XML playlist
# with direct video links as progressiveDownloadUrl (for now these are extracted)
# and playlist with f4m and m3u8 as streamingUrl
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
'description': 'md5:33a4805a855c9baf7115fcbde57e7025',
'timestamp': 1434397334,
'upload_date': '20150615',
},
'params': {
'skip_download': True,
}
}, {
# single video embedded with data-playable containing XML playlists (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
'description': 'md5:1525f17448c4ee262b64b8f0c9ce66c8',
'timestamp': 1434713142,
'upload_date': '20150619',
},
'params': {
'skip_download': True,
}
}, {
# single video from video playlist embedded with vxp-playlist-data JSON
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
'info_dict': {
'id': 'p02w6qjc',
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
}
}, {
# single video story with digitalData
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
'info_dict': {
'id': 'p02q6gc4',
'ext': 'flv',
'title': 'Sri Lanka’s spicy secret',
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
'timestamp': 1437674293,
'upload_date': '20150723',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video story without digitalData
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
'info_dict': {
'id': 'p018zqqg',
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
'timestamp': 1415867444,
'upload_date': '20141113',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video embedded with Morph
'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975',
'info_dict': {
'id': 'p041vhd0',
'ext': 'mp4',
'title': "Nigeria v Japan - Men's First Round",
'description': 'Live coverage of the first round from Group B at the Amazonia Arena.',
'duration': 7980,
'uploader': 'BBC Sport',
'uploader_id': 'bbc_sport',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Georestricted to UK',
}, {
# single video with playlist.sxml URL in playlist param
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
'duration': 140,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with multiple videos embedded with playlist.sxml in playlist param
'url': 'http://www.bbc.com/sport/0/football/34475836',
'info_dict': {
'id': '34475836',
'title': 'Jurgen Klopp: Furious football from a witty and winning coach',
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
},
'playlist_count': 3,
}, {
# school report article with single video
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
'info_dict': {
'id': '35744779',
'title': 'School which breaks down barriers in Jerusalem',
},
'playlist_count': 1,
}, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
}, {
# custom redirection to www.bbc.com
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
'only_matching': True,
}, {
# single video article embedded with data-media-vpid
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
else super(BBCIE, cls).suitable(url))
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
source_files = media_meta.get('sourceFiles')
if source_files:
return [{
'url': f['url'],
'format_id': format_id,
'ext': f.get('encoding'),
'tbr': float_or_none(f.get('bitrate'), 1000),
'filesize': int_or_none(f.get('filesize')),
} for format_id, f in source_files.items() if f.get('url')], []
programme_id = media_meta.get('externalId')
if programme_id:
return self._download_media_selector(programme_id)
# Process playlist.sxml as legacy playlist
href = media_meta.get('href')
if href:
playlist = self._download_legacy_playlist_url(href)
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
return formats, subtitles
return [], []
def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
programme_id, title, description, duration, formats, subtitles = \
self._process_legacy_playlist_url(url, playlist_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
json_ld_info = self._search_json_ld(webpage, playlist_id, default={})
timestamp = json_ld_info.get('timestamp')
playlist_title = json_ld_info.get('title')
if not playlist_title:
playlist_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'playlist title', default=None)
if playlist_title:
playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip()
playlist_description = json_ld_info.get(
'description') or self._og_search_description(webpage, default=None)
if not timestamp:
timestamp = parse_iso8601(self._search_regex(
[r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
r'"datePublished":\s*"([^"]+)'],
webpage, 'date', default=None))
entries = []
# article with multiple videos embedded with playlist.sxml (e.g.
# http://www.bbc.com/sport/0/football/34475836)
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
if playlists:
entries = [
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
for playlist_url in playlists]
# news article with multiple videos embedded with data-playable
data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
if data_playables:
for _, data_playable_json in data_playables:
data_playable = self._parse_json(
unescapeHTML(data_playable_json), playlist_id, fatal=False)
if not data_playable:
continue
settings = data_playable.get('settings', {})
if settings:
# data-playable with video vpid in settings.playlistObject.items (e.g.
# http://www.bbc.com/news/world-us-canada-34473351)
playlist_object = settings.get('playlistObject', {})
if playlist_object:
items = playlist_object.get('items')
if items and isinstance(items, list):
title = playlist_object['title']
description = playlist_object.get('summary')
duration = int_or_none(items[0].get('duration'))
programme_id = items[0].get('vpid')
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
})
else:
# data-playable without vpid but with a playlist.sxml URLs
# in otherSettings.playlist (e.g.
# http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
playlist = data_playable.get('otherSettings', {}).get('playlist', {})
if playlist:
entry = None
for key in ('streaming', 'progressiveDownload'):
playlist_url = playlist.get('%sUrl' % key)
if not playlist_url:
continue
try:
info = self._extract_from_playlist_sxml(
playlist_url, playlist_id, timestamp)
if not entry:
entry = info
else:
entry['title'] = info['title']
entry['formats'].extend(info['formats'])
except Exception as e:
# Some playlist URL may fail with 500, at the same time
# the other one may work fine (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
continue
raise
if entry:
self._sort_formats(entry['formats'])
entries.append(entry)
if entries:
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
[r'data-(?:video-player|media)-vpid="(%s)"' % self._ID_REGEX,
r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
digital_data = self._parse_json(
self._search_regex(
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
programme_id, fatal=False)
page_info = digital_data.get('page', {}).get('pageInfo', {})
title = page_info.get('pageName') or self._og_search_title(webpage)
description = page_info.get('description') or self._og_search_description(webpage)
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
return {
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
# There are several setPayload calls may be present but the video
# seems to be always related to the first one
morph_payload = self._parse_json(
self._search_regex(
r'Morph\.setPayload\([^,]+,\s*({.+?})\);',
webpage, 'morph payload', default='{}'),
playlist_id, fatal=False)
if morph_payload:
components = try_get(morph_payload, lambda x: x['body']['components'], list) or []
for component in components:
if not isinstance(component, dict):
continue
lead_media = try_get(component, lambda x: x['props']['leadMedia'], dict)
if not lead_media:
continue
identifiers = lead_media.get('identifiers')
if not identifiers or not isinstance(identifiers, dict):
continue
programme_id = identifiers.get('vpid') or identifiers.get('playablePid')
if not programme_id:
continue
title = lead_media.get('title') or self._og_search_title(webpage)
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
description = lead_media.get('summary')
uploader = lead_media.get('masterBrand')
uploader_id = lead_media.get('mid')
duration = None
duration_d = lead_media.get('duration')
if isinstance(duration_d, dict):
duration = parse_duration(dict_get(
duration_d, ('rawDuration', 'formattedDuration', 'spokenDuration')))
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
'subtitles': subtitles,
}
def extract_all(pattern):
return list(filter(None, map(
lambda s: self._parse_json(s, playlist_id, fatal=False),
re.findall(pattern, webpage))))
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
if embed_url and re.match(EMBED_URL, embed_url):
entries.append(embed_url)
entries.extend(re.findall(
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
if entries:
return self.playlist_result(
[self.url_result(entry_, 'BBCCoUk') for entry_ in entries],
playlist_id, playlist_title, playlist_description)
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
medias = extract_all(r"data-media-meta='({[^']+})'")
if not medias:
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
media_asset = self._search_regex(
r'mediaAssetPage\.init\(\s*({.+?}), "/',
webpage, 'media asset', default=None)
if media_asset:
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
medias = []
for video in media_asset_page.get('videos', {}).values():
medias.extend(video.values())
if not medias:
# Multiple video playlist with single `now playing` entry (e.g.
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
vxp_playlist = self._parse_json(
self._search_regex(
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
webpage, 'playlist data'),
playlist_id)
playlist_medias = []
for item in vxp_playlist:
media = item.get('media')
if not media:
continue
playlist_medias.append(media)
# Download single video if found media with asset id matching the video id from URL
if item.get('advert', {}).get('assetId') == playlist_id:
medias = [media]
break
# Fallback to the whole playlist
if not medias:
medias = playlist_medias
entries = []
for num, media_meta in enumerate(medias, start=1):
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
if not formats:
continue
self._sort_formats(formats)
video_id = media_meta.get('externalId')
if not video_id:
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
title = media_meta.get('caption')
if not title:
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
images = []
for image in media_meta.get('images', {}).values():
images.extend(image.values())
if 'image' in media_meta:
images.append(media_meta['image'])
thumbnails = [{
'url': image.get('href'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in images]
entries.append({
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
class BBCCoUkArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
IE_NAME = 'bbc.co.uk:article'
IE_DESC = 'BBC articles'
_TEST = {
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
'info_dict': {
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
},
'playlist_count': 4,
'add_ie': ['BBCCoUk'],
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage).strip()
entries = [self.url_result(programme_url) for programme_url in re.findall(
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
return self.playlist_result(entries, playlist_id, title, description)
class BBCCoUkPlaylistBaseIE(InfoExtractor):
def _entries(self, webpage, url, playlist_id):
single_page = 'page' in compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
for page_num in itertools.count(2):
for video_id in re.findall(
self._VIDEO_ID_TEMPLATE % BBCCoUkIE._ID_REGEX, webpage):
yield self.url_result(
self._URL_TEMPLATE % video_id, BBCCoUkIE.ie_key())
if single_page:
return
next_page = self._search_regex(
r'<li[^>]+class=(["\'])pagination_+next\1[^>]*><a[^>]+href=(["\'])(?P<url>(?:(?!\2).)+)\2',
webpage, 'next page url', default=None, group='url')
if not next_page:
break
webpage = self._download_webpage(
compat_urlparse.urljoin(url, next_page), playlist_id,
'Downloading page %d' % page_num, page_num)
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title, description = self._extract_title_and_description(webpage)
return self.playlist_result(
self._entries(webpage, url, playlist_id),
playlist_id, title, description)
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:iplayer:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/(?:episodes|group)/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 6,
'skip': 'This programme is not currently available on BBC iPlayer',
}, {
# Available for over a year unlike 30 days for most other programmes
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
'info_dict': {
'id': 'p02tcc32',
'title': 'Bohemian Icons',
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
},
'playlist_mincount': 10,
}]
def _extract_title_and_description(self, webpage):
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
description = self._search_regex(
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
webpage, 'description', fatal=False, group='value')
return title, description
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/(?P<id>%s)/(?:episodes|broadcasts|clips)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/programmes/%s'
_VIDEO_ID_TEMPLATE = r'data-pid=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance - Clips - BBC Four',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 7,
}, {
# multipage playlist, explicit page
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips?page=1',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 24,
}, {
# multipage playlist, all pages
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 142,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/broadcasts/2016/06',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b055jkys/episodes/player',
'only_matching': True,
}]
def _extract_title_and_description(self, webpage):
title = self._og_search_title(webpage, fatal=False)
description = self._og_search_description(webpage)
return title, description
|
Dino0631/RedRain-Bot
|
lib/youtube_dl/extractor/bbc.py
|
Python
|
gpl-3.0
| 53,625
|
[
"VisIt"
] |
00b2f88f315610bd6bd3d7102f67e7aad13c42c7e117932c582a90be21b54742
|
LVR = '''10 223_0 0.002695616 1 0.00
10 223_100000 0 2 0.00
10 223_200000 0.007806037 3 0.00
10 223_300000 0.00293115 4 0.01
10 324_0 0.009263053 5 0.01
10 324_100000 0 6 0.02
10 4a_100000 0 7 0.02
10 4a_0 0.015445527 8 0.02
10 445_0 0.002410607 9 0.04
10 749_0 0.003562352 10 0.04
10 48a_800000 0.005094611 11 0.04
10 48a_700000 0.006276164 12 0.05
10 48a_600000 0 13 0.06
10 48a_500000 0 14 0.06
10 48a_400000 0.001319258 15 0.06
10 48a_300000 0.001165716 16 0.06
10 48a_200000 0 17 0.06
10 48a_100000 0.004875112 18 0.06
10 48a_0 0.011606714 19 0.06
10 90_1000000 0.006050412 20 0.07
10 90_900000 0 21 0.08
10 90_800000 0 22 0.08
10 90_700000 0.012317647 23 0.08
10 90_600000 0.00242351 24 0.09
10 90_500000 0.005260552 25 0.10
10 90_400000 0.017070644 26 0.10
10 90_300000 0.007217653 27 0.12
10 90_200000 0.018792559 28 0.12
10 90_100000 0.006135235 29 0.14
10 90_0 0 30 0.15
10 206_400000 0.004870802 31 0.15
10 206_300000 0 32 0.15
10 206_200000 0.01132475 33 0.15
10 206_100000 0.009808547 34 0.17
10 206_0 0.003722065 35 0.18
10 210_300000 0.009808261 36 0.18
10 210_200000 0.00492777 37 0.19
10 210_100000 0 38 0.19
10 210_0 0.009858526 39 0.19
10 13_0 0 40 0.20
10 13_100000 0.005602516 41 0.20
10 13_200000 0 42 0.21
10 13_300000 0.008231515 43 0.21
10 13_400000 0.003944171 44 0.22
10 13_500000 0.011383721 45 0.22
10 13_600000 0.219829 46 0.23
10 13_700000 0 47 0.45
10 13_800000 0.002264955 48 0.45
10 13_900000 0.019489854 49 0.46
10 13_1000000 0.015010154 50 0.47
10 13_1100000 0 51 0.49
10 13_1200000 0.013836755 52 0.49
10 13_1300000 0.004241092 53 0.50
10 13_1400000 0.012835057 54 0.51
10 13_1500000 0.014375324 55 0.52
10 13_1600000 0.019986989 56 0.54
10 13_1700000 0.023221458 57 0.56
10 13_1800000 0.003011143 58 0.58
10 13_1900000 0.006286488 59 0.58
10 13_2000000 0.012559756 60 0.59
10 13_2100000 0.022306371 61 0.60
10 13_2200000 0.010754413 62 0.62
10 13_2300000 0.008302552 63 0.63
10 13_2400000 0.008323509 64 0.64
10 13_2500000 0 65 0.65
10 13_2600000 0.25 66 0.65
10 267_200000 0.067709909 67 0.90
10 40_100000 0.037459599 68 0.97
10 40_600000 0.003051282 69 1.01
10 40_700000 0.001947026 70 1.01
10 40_900000 0 71 1.01
10 40_1100000 0.005823506 72 1.01
10 40_1200000 0.004430124 73 1.02
10 40_1300000 0 74 1.02
10 40_1400000 0.003320033 75 1.02
10 33_1700000 0.006618393 76 1.02
10 33_1600000 0 77 1.03
10 33_1500000 0 78 1.03
10 33_1400000 0.004945932 79 1.03
10 33_1300000 0 80 1.04
10 33_1200000 0 81 1.04
10 33_1100000 0 82 1.04
10 33_700000 0 83 1.04
10 33_600000 0 84 1.04
10 33_400000 0 85 1.04
10 9a_1100000 0 86 1.04
10 9a_1000000 0.002558101 87 1.04
10 9a_800000 0 88 1.04
10 9a_600000 0.00517781 89 1.04
10 9a_400000 0 90 1.04
10 9a_300000 0.005102225 91 1.04
10 156_100000 0.00961609 92 1.05
10 172_200000 0.006282532 93 1.06
10 172_400000 0.008864757 94 1.06
10 50_200000 0 95 1.07
10 50_300000 0.016962187 96 1.07
10 50_800000 0.014688043 97 1.09
10 50_1000000 0.015300298 98 1.10
10 50_1100000 0 99 1.12
10 50_1200000 0.003363562 100 1.12
10 50_1300000 0.018466091 101 1.12
10 50_1400000 0.003639282 102 1.14
10 209_0 0 103 1.15
10 209_100000 0 104 1.15
10 209_300000 0.003746532 105 1.15
10 490_0 0.003777772 106 1.15
10 193a_300000 0 107 1.15
10 193a_200000 0 108 1.15
10 193a_100000 0.005566139 109 1.15
10 193a_0 0.011605608 110 1.16
10 125b_600000 0.004754541 111 1.17
10 125b_300000 0 112 1.17
10 125b_200000 0 113 1.17
10 125b_100000 0 114 1.17
10 125b_0 0.014824577 115 1.17
10 188_500000 0 116 1.19
10 188_400000 0 117 1.19
10 188_300000 0.009853888 118 1.19
10 311_100000 0 119 1.20
10 311_0 0 120 1.20
10 204_0 0.007392043 121 1.20
10 204_100000 0 122 1.21
10 204_200000 0 123 1.21
10 204_300000 0.004738853 124 1.21
10 204_400000 0 125 1.21
10 87_0 0.016765536 126 1.21
10 87_100000 0.013120358 127 1.23
10 87_200000 0.004734061 128 1.24
10 87_300000 0 129 1.25
10 87_400000 0.004145251 130 1.25
10 87_500000 0.011454728 131 1.25
10 87_600000 0.022462746 132 1.26
10 87_700000 0.008965364 133 1.28
10 87_800000 0.002724971 134 1.29
10 87_900000 0.010437933 135 1.30
10 87_1000000 0.006485347 136 1.31
10 159_0 0.007820256 137 1.31
10 159_100000 0.002419955 138 1.32
10 159_200000 0.005813822 139 1.32
10 159_300000 0.020684938 140 1.33
10 159_400000 0.002816386 141 1.35
10 159_500000 142 1.35
11 75_0 0 143 0.00
11 75_100000 0.012475895 144 0.00
11 75_200000 0.022200174 145 0.01
11 75_300000 0.007434717 146 0.03
11 75_400000 0.032980049 147 0.04
11 75_500000 0.011124387 148 0.08
11 75_600000 0.004912581 149 0.09
11 75_700000 0.005664011 150 0.09
11 75_800000 0.017839063 151 0.10
11 75_900000 0.010698399 152 0.11
11 75_1000000 0.004799686 153 0.13
11 75_1100000 0.014927357 154 0.13
11 228_0 0 155 0.15
11 228_100000 0.007989271 156 0.15
11 228_200000 0.017972799 157 0.15
11 228_300000 0.005531813 158 0.17
11 273b_0 0.015115941 159 0.18
11 273b_100000 0.009045348 160 0.19
11 213_0 0.006699337 161 0.20
11 213_100000 0.013610052 162 0.21
11 213_200000 0.019090455 163 0.22
11 213_300000 0.003514213 164 0.24
11 213_400000 0 165 0.24
11 63a_0 0.00728548 166 0.24
11 63a_100000 0 167 0.25
11 63a_200000 0.010694847 168 0.25
11 257b_0 0.00364347 169 0.26
11 257b_100000 0.003674798 170 0.27
11 30_1900000 0.0082505 171 0.27
11 30_1800000 0.00455808 172 0.28
11 30_1700000 0.004562384 173 0.28
11 30_1600000 0.00820042 174 0.29
11 30_1500000 0.003732599 175 0.29
11 30_1400000 0.010715278 176 0.30
11 30_1300000 0.008492672 177 0.31
11 30_1200000 0.002709558 178 0.32
11 30_1100000 0.008244275 179 0.32
11 48b_600000 0.009102747 180 0.33
11 48b_500000 0 181 0.34
11 48b_400000 0.006262054 182 0.34
11 48b_300000 0.006342743 183 0.34
11 48b_200000 0.00497826 184 0.35
11 48b_100000 0.002299748 185 0.36
11 243_0 0.003532469 186 0.36
11 415_0 0.033877455 187 0.36
11 239_200000 0.01132406 188 0.39
11 239_100000 0.019484082 189 0.41
11 167_200000 0.003428267 190 0.43
11 167_100000 0 191 0.43
11 167_0 0 192 0.43
11 131_600000 0.005180429 193 0.43
11 221a_0 0 194 0.43
11 221a_100000 0.005004248 195 0.43
11 100_100000 0 196 0.44
11 100_200000 0 197 0.44
11 100_300000 0 198 0.44
11 100_400000 0.003871282 199 0.44
11 100_500000 0 200 0.44
11 100_600000 0.012252267 201 0.44
11 100_700000 0 202 0.46
11 100_800000 0 203 0.46
11 100_900000 0 204 0.46
11 598_0 0.008257851 205 0.46
11 162_0 0.003395694 206 0.46
11 162_100000 0 207 0.47
11 162_200000 0.014576192 208 0.47
11 162_300000 0.003349408 209 0.48
11 162_400000 0 210 0.48
11 162_500000 0.001401901 211 0.48
11 161_500000 0.001453706 212 0.49
11 161_400000 0 213 0.49
11 161_300000 0.008145203 214 0.49
11 161_200000 0.012410627 215 0.50
11 161_100000 0 216 0.51
11 161_0 0 217 0.51
11 22a_0 0 218 0.51
11 779_0 0 219 0.51
11 22b_0 0.014572191 220 0.51
11 22b_200000 0.015475155 221 0.52
11 22b_400000 0.005001292 222 0.54
11 22b_500000 0.003689973 223 0.54
11 22b_600000 0.014864266 224 0.55
11 22b_700000 0.007142414 225 0.56
11 22b_800000 0.015236968 226 0.57
11 22b_900000 0.016161634 227 0.58
11 22b_1000000 0 228 0.60
11 22b_1100000 0.01286085 229 0.60
11 22b_1200000 0.05317206 230 0.61
11 22b_1300000 0.216622408 231 0.67
11 185_400000 0.025585439 232 0.88
11 185_200000 0.079638826 233 0.91
11 185_100000 0.009855297 234 0.99
11 185_0 0 235 1.00
11 182_400000 0.019476054 236 1.00
11 182_300000 0 237 1.02
11 182_200000 0.00979352 238 1.02
11 39b_200000 0 239 1.03
11 39b_300000 0 240 1.03
11 39b_500000 0.002951744 241 1.03
11 39b_600000 0.00259207 242 1.03
11 39b_700000 0 243 1.03
11 47b_100000 0.00376195 244 1.03
11 47b_300000 0 245 1.04
11 47b_500000 0.003271712 246 1.04
11 47b_900000 0.001763016 247 1.04
11 49_1300000 0.004666057 248 1.04
11 49_1200000 0.003641593 249 1.05
11 49_1100000 0.09651286 250 1.05
11 49_1000000 0.090237597 251 1.15
11 49_700000 252 1.24
12 58b_900000 0 253 0.00
12 58b_800000 0.013669863 254 0.00
12 58b_700000 0 255 0.01
12 58b_600000 0 256 0.01
12 58b_500000 0.012062596 257 0.01
12 58b_400000 0.00976764 258 0.03
12 58b_300000 0.015709889 259 0.04
12 58b_200000 0 260 0.05
12 58b_100000 0.036665773 261 0.05
12 220_300000 0.009974837 262 0.09
12 220_200000 0.00429205 263 0.10
12 220_100000 0 264 0.10
12 220_0 0.002488737 265 0.10
12 336_0 0 266 0.10
12 336_100000 0.004586013 267 0.10
12 132_700000 0.005904451 268 0.11
12 132_600000 0 269 0.12
12 132_500000 0.002860423 270 0.12
12 132_400000 0.013775421 271 0.12
12 132_300000 0.001510497 272 0.13
12 132_200000 0.000911659 273 0.13
12 132_100000 0.003984455 274 0.13
12 132_0 0.014318406 275 0.14
12 224_300000 0 276 0.15
12 224_100000 0.002756486 277 0.15
12 224_0 0.007319349 278 0.16
12 381_100000 0 279 0.16
12 349_0 0.013499314 280 0.16
12 332_0 0.012181888 281 0.18
12 305_100000 0.008115668 282 0.19
12 703_0 0 283 0.20
12 299_300000 0 284 0.20
12 201_300000 0 285 0.20
12 201_200000 0.009888626 286 0.20
12 201_0 0.02042121 287 0.21
12 91_300000 0 288 0.23
12 91_600000 0.015169311 289 0.23
12 91_800000 0.036380237 290 0.24
12 553_0 0.092714125 291 0.28
12 31_1300000 0.106501949 292 0.37
12 31_800000 0.023965916 293 0.48
12 31_700000 0.013375393 294 0.50
12 31_300000 0.004298118 295 0.51
12 31_200000 0.003050697 296 0.52
12 31_100000 0.002796848 297 0.52
12 114a_300000 0 298 0.52
12 160_400000 0.002328048 299 0.52
12 160_300000 0 300 0.53
12 78_1000000 0.008693116 301 0.53
12 450_0 0.006163109 302 0.54
12 241_300000 0.009224924 303 0.54
12 241_200000 0 304 0.55
12 241_100000 0 305 0.55
12 241_0 0.001168348 306 0.55
12 394_0 0 307 0.55
12 143_600000 0 308 0.55
12 143_500000 0 309 0.55
12 143_300000 0.006084598 310 0.55
12 143_200000 0.025096212 311 0.56
12 917_0 0.051110155 312 0.58
12 337_0 0.062902495 313 0.63
12 819_0 0.04991887 314 0.70
12 271_200000 0.013317594 315 0.75
12 309_0 0.005949376 316 0.76
12 250_200000 0.014503919 317 0.77
12 577_0 0 318 0.78
12 142_600000 0.019843877 319 0.78
12 142_0 0.007345431 320 0.80
12 435_0 0.012101691 321 0.81
12 140a_200000 0.004099238 322 0.82
12 140a_100000 0.006556896 323 0.82
12 140a_0 0 324 0.83
12 37_1700000 0.002471981 325 0.83
12 37_1600000 0 326 0.83
12 37_1500000 0 327 0.83
12 37_1400000 0 328 0.83
12 37_1300000 0.008917887 329 0.83
12 37_1200000 0.005915427 330 0.84
12 37_1100000 0.002362477 331 0.85
12 37_1000000 0.004891661 332 0.85
12 37_900000 0.002474527 333 0.86
12 37_800000 0.002364682 334 0.86
12 37_700000 0.003447942 335 0.86
12 37_600000 0 336 0.86
12 37_500000 0.005063918 337 0.86
12 37_400000 0 338 0.87
12 37_300000 0.006262175 339 0.87
12 37_200000 0.002309506 340 0.88
12 37_100000 0.011105549 341 0.88
12 37_0 0.011033432 342 0.89
12 334_100000 0 343 0.90
12 334_0 0.009808123 344 0.90
12 297_200000 0 345 0.91
12 297_100000 0.008757673 346 0.91
12 297_0 0.004615864 347 0.92
12 39a_800000 0.007057734 348 0.92
12 39a_700000 0.004431011 349 0.93
12 39a_600000 0 350 0.93
12 39a_500000 0.014661185 351 0.93
12 39a_400000 0.006384759 352 0.95
12 39a_300000 0.025139164 353 0.96
12 39a_200000 0.008057093 354 0.98
12 39a_100000 0.023706559 355 0.99
12 39a_0 0.003397709 356 1.01
12 17b_0 0.026463427 357 1.02
12 17b_100000 0.004995086 358 1.04
12 17b_200000 0.003400272 359 1.05
12 17b_300000 0.009388585 360 1.05
12 17b_400000 0.013320661 361 1.06
12 17b_500000 0.004780649 362 1.07
12 17b_600000 0 363 1.08
12 17b_700000 0.007277915 364 1.08
12 17b_800000 0.007842189 365 1.09
12 17b_900000 0.008349237 366 1.09
12 17b_1000000 0.013833672 367 1.10
12 17b_1100000 0 368 1.12
12 17b_1200000 0 369 1.12
12 683_0 0 370 1.12
12 44b_0 0.016087936 371 1.12
12 44b_100000 0.014674929 372 1.13
12 44b_200000 0.014947631 373 1.15
12 44b_300000 0.011357096 374 1.16
12 44b_400000 0.010810372 375 1.17
12 44b_500000 0.05019858 376 1.18
12 44b_600000 377 1.23
13 122_0 0.006561166 378 0.00
13 122_100000 0 379 0.01
13 122_200000 0.019778226 380 0.01
13 122_400000 0.006119748 381 0.03
13 122_500000 0.018969512 382 0.03
13 122_600000 0.02514421 383 0.05
13 235_300000 0.003655208 384 0.08
13 235_200000 0.0609897 385 0.08
13 235_0 0.00640169 386 0.14
13 52_0 0 387 0.15
13 52_100000 0.009447277 388 0.15
13 52_200000 0.003592452 389 0.16
13 52_300000 0.012245047 390 0.16
13 52_400000 0 391 0.17
13 52_700000 0.016384402 392 0.17
13 52_800000 0.012297693 393 0.19
13 52_1200000 0.003252032 394 0.20
13 52_1300000 0.006555996 395 0.20
13 52_1400000 0.021119276 396 0.21
13 369_100000 0.026085636 397 0.23
13 369_0 0.00767077 398 0.26
13 139_0 0.002824118 399 0.27
13 139_100000 0 400 0.27
13 139_300000 0.002488536 401 0.27
13 139_400000 0 402 0.27
13 139_600000 0 403 0.27
13 329_0 0 404 0.27
13 129b_400000 0.026363929 405 0.27
13 129b_300000 0 406 0.30
13 129b_200000 0.014392501 407 0.30
13 138_100000 0.020248549 408 0.31
13 34_1700000 0.006905985 409 0.33
13 34_1600000 0 410 0.34
13 34_1500000 0 411 0.34
13 34_1400000 0 412 0.34
13 34_1200000 0 413 0.34
13 34_1000000 0 414 0.34
13 34_600000 0 415 0.34
13 264_0 0 416 0.34
13 236_200000 0 417 0.34
13 186_300000 0.002574358 418 0.34
13 28_1300000 0 419 0.34
13 28_1200000 0 420 0.34
13 28_1100000 0 421 0.34
13 28_500000 0 422 0.34
13 28_400000 0.006249946 423 0.34
13 28_300000 0.004405973 424 0.35
13 28_0 0.028223138 425 0.35
13 225_300000 0.01124038 426 0.38
13 107_100000 0.019733572 427 0.39
13 107_200000 0.016277273 428 0.41
13 107_800000 0.018333766 429 0.43
13 230_200000 0.031622445 430 0.45
13 230_0 0.007499338 431 0.48
13 402_0 0.00718118 432 0.49
13 174_0 0 433 0.49
13 174_100000 0 434 0.49
13 174_200000 0.003940088 435 0.49
13 174_400000 0.014964524 436 0.50
13 174_500000 0 437 0.51
13 344_0 0.014946199 438 0.51
13 108_800000 0.005938766 439 0.53
13 108_700000 0.01549062 440 0.53
13 108_600000 0.005717577 441 0.55
13 108_500000 0 442 0.55
13 108_400000 0.013946089 443 0.55
13 108_300000 0.003502677 444 0.57
13 108_200000 0.013071091 445 0.57
13 108_100000 0.016011562 446 0.58
13 245_300000 0.008725653 447 0.60
13 245_200000 0 448 0.61
13 245_100000 0 449 0.61
13 245_0 0 450 0.61
13 4b_3800000 0 451 0.61
13 4b_3700000 0 452 0.61
13 4b_3600000 0 453 0.61
13 4b_3500000 0 454 0.61
13 4b_3400000 0 455 0.61
13 4b_3300000 0 456 0.61
13 4b_3200000 0 457 0.61
13 4b_3100000 0 458 0.61
13 4b_3000000 0 459 0.61
13 4b_2900000 0 460 0.61
13 4b_2800000 0 461 0.61
13 4b_2700000 0 462 0.61
13 4b_2600000 0 463 0.61
13 4b_2500000 0 464 0.61
13 4b_2400000 0 465 0.61
13 4b_2300000 0 466 0.61
13 4b_2200000 0 467 0.61
13 4b_2100000 0 468 0.61
13 4b_2000000 0 469 0.61
13 4b_1900000 0 470 0.61
13 4b_1800000 0 471 0.61
13 4b_1700000 0 472 0.61
13 4b_1600000 0 473 0.61
13 4b_1500000 0 474 0.61
13 4b_1400000 0 475 0.61
13 4b_1300000 0 476 0.61
13 4b_1200000 0 477 0.61
13 4b_1100000 0 478 0.61
13 4b_1000000 0 479 0.61
13 4b_900000 0 480 0.61
13 4b_800000 0 481 0.61
13 4b_700000 0 482 0.61
13 4b_600000 0 483 0.61
13 4b_500000 0 484 0.61
13 4b_400000 0 485 0.61
13 4b_300000 0 486 0.61
13 4b_200000 0 487 0.61
13 4b_100000 0 488 0.61
13 4b_0 0 489 0.61
13 83b_800000 0 490 0.61
13 83b_700000 0 491 0.61
13 83b_600000 0 492 0.61
13 83b_500000 0 493 0.61
13 83b_400000 0.013027681 494 0.61
13 83b_300000 0.026271757 495 0.62
13 83b_200000 0.032888548 496 0.65
13 83b_100000 0.01728933 497 0.68
13 83b_0 0.008378494 498 0.70
13 115a_300000 0.027663276 499 0.71
13 115a_200000 0.012693832 500 0.73
13 115a_100000 0.030246153 501 0.75
13 115a_0 502 0.78
14 102_0 3.33E-005 503 0.00
14 102_100000 0.009241984 504 0.00
14 102_200000 0.009041945 505 0.01
14 102_300000 0.006405608 506 0.02
14 102_400000 0.011249579 507 0.02
14 102_500000 0.020100348 508 0.04
14 102_600000 0.007108801 509 0.06
14 102_700000 0.007864661 510 0.06
14 102_800000 0.005565408 511 0.07
14 102_900000 0.007248571 512 0.08
14 6c_0 0.006942809 513 0.08
14 6c_100000 0.006948291 514 0.09
14 26a_0 0.007356939 515 0.10
14 26a_100000 0 516 0.11
14 26a_200000 0.002429025 517 0.11
14 26a_300000 0.00910155 518 0.11
14 26a_400000 0.014754207 519 0.12
14 26a_500000 0.012210958 520 0.13
14 26a_600000 0.010191538 521 0.14
14 26a_700000 0.008991645 522 0.15
14 26a_800000 0 523 0.16
14 26a_900000 0.007408521 524 0.16
14 26a_1000000 0 525 0.17
14 26a_1100000 0.008463468 527 0.17
14 26a_1200000 0.006836389 528 0.18
14 26a_1300000 0.009891392 529 0.19
14 26a_1400000 0.002358343 530 0.20
14 26a_1500000 0.007604274 531 0.20
14 278_100000 0 532 0.21
14 278_0 0.012048773 533 0.21
14 148_100000 0 534 0.22
14 148_200000 0 535 0.22
14 148_300000 0.003059604 536 0.22
14 148_400000 0.009245748 537 0.22
14 148_500000 0 538 0.23
14 148_600000 0.006810096 539 0.23
14 198_0 0.001665123 540 0.24
14 198_100000 0.009490945 541 0.24
14 198_200000 0.006950576 542 0.25
14 198_300000 0.018996699 543 0.25
14 101_0 0.010536131 544 0.27
14 101_100000 0 545 0.28
14 101_200000 0.014011616 546 0.28
14 101_400000 0.006901545 547 0.30
14 101_500000 0 548 0.31
14 101_600000 0.000298702 549 0.31
14 101_800000 0.001943378 550 0.31
14 101_900000 0.007570753 551 0.31
14 17a_1100000 0 552 0.31
14 17a_1000000 0.01803178 553 0.31
14 17a_900000 0.003128034 554 0.33
14 17a_800000 0.006819904 555 0.34
14 17a_600000 0.009829275 556 0.34
14 17a_500000 0.011544508 557 0.35
14 17a_400000 0.00653545 558 0.36
14 17a_300000 0 559 0.37
14 17a_200000 0.003346986 560 0.37
14 17a_0 0.001973558 561 0.37
14 453_0 0.012675754 562 0.38
14 247_200000 0 563 0.39
14 247_100000 0 564 0.39
14 247_0 0 565 0.39
14 208_0 0 566 0.39
14 208_100000 0 567 0.39
14 208_300000 0 568 0.39
14 208_400000 0.009688041 569 0.39
14 419_0 0 570 0.40
14 207_300000 0.002506073 571 0.40
14 207_200000 0.002421866 572 0.40
14 207_100000 0 573 0.40
14 207_0 0.013907616 574 0.40
14 164_500000 0.003320436 575 0.42
14 164_400000 0.004932616 576 0.42
14 164_300000 0 577 0.43
14 164_200000 0 578 0.43
14 164_100000 0.003626362 579 0.43
14 164_0 0.004355074 580 0.43
14 123_700000 0 581 0.43
14 123_600000 0.009422422 582 0.43
14 123_500000 0.002438951 583 0.44
14 123_400000 0.002347219 584 0.45
14 123_300000 0 585 0.45
14 123_200000 0.009964279 586 0.45
14 123_100000 0.004919373 587 0.46
14 123_0 0.005142856 588 0.46
14 15_0 0.002674849 589 0.47
14 15_100000 0.00215002 590 0.47
14 15_200000 0 591 0.47
14 15_300000 0 592 0.47
14 15_400000 0.012743024 593 0.47
14 15_500000 0.003869369 594 0.49
14 15_600000 0 595 0.49
14 15_700000 0.007645297 596 0.49
14 15_800000 0.01146754 597 0.50
14 15_2500000 0 598 0.51
14 127_500000 0 599 0.51
14 127_400000 0 600 0.51
14 127_300000 0.002036828 601 0.51
14 127_200000 0 602 0.51
14 127_0 0.009767454 603 0.51
14 168_200000 0.002398672 604 0.52
14 168_300000 0 605 0.52
14 168_400000 0.004922195 606 0.52
14 168_500000 0.002478461 607 0.53
14 211_300000 0.002454068 608 0.53
14 211_200000 0 609 0.53
14 211_100000 0.004950013 610 0.53
14 211_0 0.002526092 611 0.54
14 300_0 0 612 0.54
14 300_100000 0.003815448 613 0.54
14 300_200000 0.01467042 614 0.54
14 126_800000 0 615 0.56
14 126_700000 0.013413957 616 0.56
14 126_600000 0.008315657 617 0.57
14 126_500000 0 618 0.58
14 126_400000 0.006657999 619 0.58
14 126_200000 0.003123336 620 0.59
14 126_100000 0.004959227 621 0.59
14 126_0 0 622 0.59
14 291_100000 0.007387826 623 0.59
14 361_0 0.005024455 624 0.60
14 361_100000 0.004852222 625 0.61
14 121_700000 0 626 0.61
14 121_600000 0.002954493 627 0.61
14 121_500000 0.011937261 628 0.62
14 121_300000 0 629 0.63
14 121_200000 0.002600092 630 0.63
14 121_100000 0.013359473 631 0.63
14 1042_0 0.005288825 632 0.64
14 303_0 0 633 0.65
14 303_100000 0 634 0.65
14 636_0 0 635 0.65
14 195_0 0 636 0.65
14 195_100000 0 637 0.65
14 195_200000 0.012054482 638 0.65
14 195_400000 0 639 0.66
14 323_100000 0 640 0.66
14 219_0 0.003140005 641 0.66
14 219_100000 0 642 0.66
14 219_200000 0.008548214 643 0.66
14 366_0 0 644 0.67
14 128_0 0 645 0.67
14 128_100000 0.009334505 646 0.67
14 128_400000 0 647 0.68
14 128_600000 0.017219991 648 0.68
14 128_700000 0.006378911 649 0.70
14 178_0 0.010778479 650 0.70
14 178_100000 0 651 0.72
14 178_200000 0.00271203 652 0.72
14 178_300000 0 653 0.72
14 178_500000 0 654 0.72
14 382_0 0.007228647 655 0.72
14 140b_400000 0.006118873 656 0.73
14 140b_300000 0 657 0.73
14 140b_200000 0.006761238 658 0.73
14 140b_100000 0.003395603 659 0.74
14 140b_0 0 660 0.74
14 92_0 0 661 0.74
14 92_100000 0 662 0.74
14 92_200000 0.004971678 663 0.74
14 92_300000 0.002294854 664 0.75
14 92_400000 0.004936145 665 0.75
14 92_500000 0.00494939 666 0.75
14 92_600000 0 667 0.76
14 92_700000 0.006524022 668 0.76
14 92_800000 0 669 0.77
14 92_900000 0.00329085 670 0.77
14 92_1000000 0 671 0.77
14 1131_0 0 672 0.77
14 459_0 0 673 0.77
14 290_0 0.009363035 674 0.77
14 290_100000 0.011255544 675 0.78
14 290_200000 0.000348093 676 0.79
14 2_0 0.00918946 677 0.79
14 2_100000 0.01044974 678 0.80
14 2_200000 0.006746883 679 0.81
14 2_300000 0 680 0.82
14 2_400000 0 681 0.82
14 2_500000 0.002369114 682 0.82
14 2_600000 0.005072984 683 0.82
14 2_700000 0.004787739 684 0.82
14 2_800000 0 685 0.83
14 2_900000 0.002462951 686 0.83
14 2_1000000 0.002195392 687 0.83
14 2_1100000 0.009893322 688 0.83
14 2_1200000 0.008782025 689 0.84
14 2_1300000 0.00821175 690 0.85
14 2_1500000 0.011210584 691 0.86
14 2_1600000 0.013726882 692 0.87
14 2_1800000 0 693 0.88
14 2_1900000 0.007424867 694 0.88
14 2_2000000 0.009564631 695 0.89
14 2_2100000 0.035753476 696 0.90
14 2_2400000 0.020439824 697 0.94
14 2_2700000 0.001087403 698 0.96
14 2_2800000 0 699 0.96
14 2_2900000 0.017288289 700 0.96
14 2_3000000 0.009233276 701 0.98
14 2_3100000 0.002988286 702 0.99
14 2_3200000 0.003026748 703 0.99
14 2_3300000 0.020672666 704 0.99
14 2_3400000 0.008358081 705 1.01
14 2_3500000 0 706 1.02
14 2_3600000 0.004891712 707 1.02
14 2_3700000 0 708 1.03
14 2_3800000 0.004335295 709 1.03
14 2_3900000 0.017131625 710 1.03
14 2_4000000 0.007349932 711 1.05
14 2_4100000 0.012089867 712 1.05
14 2_4200000 0.003596815 713 1.07
14 2_4300000 0.022470214 714 1.07
14 2_4400000 0.01941083 715 1.09
14 2_4500000 0 716 1.11
14 58a_0 0.031952351 717 1.11
14 58a_100000 0.009376717 718 1.14
14 58a_200000 0.040007142 719 1.15
14 58a_300000 0 720 1.19
14 58a_400000 0.006742585 721 1.19
14 58a_500000 0 722 1.20
14 396_0 723 1.20
1 79_0 0 724 0.00
1 79_100000 0.014572863 725 0.00
1 79_200000 0.014004296 726 0.01
1 79_300000 0.04338026 727 0.03
1 79_400000 0.016743281 728 0.07
1 79_500000 0.016473575 729 0.09
1 79_600000 0.017509059 730 0.11
1 79_700000 0.006940014 731 0.12
1 79_800000 0.008729414 732 0.13
1 79_900000 0 733 0.14
1 79_1000000 0.00732338 734 0.14
1 79_1100000 0.007366661 735 0.15
1 69_0 0 736 0.15
1 69_100000 0.002438854 737 0.15
1 69_200000 0.004790556 738 0.16
1 69_300000 0.00882264 739 0.16
1 69_500000 0.006245696 740 0.17
1 69_600000 0.009712525 741 0.18
1 69_700000 0 742 0.19
1 69_800000 0.0077558 743 0.19
1 69_900000 0.012351279 744 0.19
1 69_1000000 0.006739608 745 0.21
1 69_1100000 0.00313135 746 0.21
1 69_1200000 0 747 0.22
1 181_0 0.007873906 748 0.22
1 181_100000 0 749 0.22
1 181_200000 0.015787877 750 0.22
1 181_300000 0.010352396 751 0.24
1 181_400000 0.009738485 752 0.25
1 181_500000 0.006861932 753 0.26
1 60_0 0.002090768 754 0.27
1 60_100000 0.01554912 755 0.27
1 60_200000 0.007554221 756 0.28
1 60_300000 0.005988716 757 0.29
1 60_400000 0.014660395 758 0.30
1 60_500000 0 759 0.31
1 60_600000 0.002498869 760 0.31
1 60_700000 0 761 0.31
1 60_800000 0.009810825 762 0.31
1 60_900000 0.012728526 763 0.32
1 60_1000000 0.010919038 764 0.34
1 60_1100000 0.010006036 765 0.35
1 60_1200000 0.008517067 766 0.36
1 60_1300000 0.020240729 767 0.37
1 333_100000 0 768 0.39
1 333_0 0.022293767 769 0.39
1 165_0 0 770 0.41
1 165_100000 0.011882288 771 0.41
1 165_200000 0.014681512 772 0.42
1 165_400000 0.007206913 773 0.44
1 308_100000 0 774 0.44
1 362_100000 0.017638425 775 0.44
1 26b_400000 0 776 0.46
1 26b_300000 0 777 0.46
1 26b_200000 0.024765276 778 0.46
1 240_200000 0.00762071 779 0.48
1 82_1000000 0.0086775 780 0.49
1 82_800000 0.002914339 781 0.50
1 82_700000 0 782 0.50
1 82_600000 0 783 0.50
1 82_500000 0 784 0.50
1 82_400000 0.003348817 785 0.50
1 82_300000 0 786 0.51
1 82_200000 0.00826346 787 0.51
1 82_100000 0.005342898 788 0.52
1 82_0 0.006747116 789 0.52
1 80a_900000 0.00742661 790 0.53
1 80a_800000 0.010881172 791 0.54
1 80a_700000 0.002921668 792 0.55
1 80a_600000 0 793 0.55
1 80a_500000 0.006649031 794 0.55
1 80a_400000 0.003343051 795 0.56
1 80a_300000 0 796 0.56
1 80a_200000 0.005586498 797 0.56
1 80a_100000 0 798 0.56
1 80a_0 0.003813457 799 0.56
1 146_0 0 800 0.57
1 146_100000 0.002915055 801 0.57
1 187_0 0.000801668 802 0.57
1 115b_400000 0.002512411 803 0.57
1 115b_300000 0 804 0.57
1 115b_200000 0 805 0.57
1 115b_100000 0 806 0.57
1 232_100000 0.002505096 807 0.57
1 232_300000 0 808 0.58
1 83a_100000 0.023982819 809 0.58
1 83a_0 810 0.60
2 18_2200000 0 811 0.00
2 18_2100000 0.004598436 812 0.00
2 18_2000000 0.021729394 813 0.00
2 18_1900000 0.015649015 814 0.03
2 18_1800000 0.01056772 815 0.04
2 18_1700000 0.009904026 816 0.05
2 18_1600000 0.007247988 817 0.06
2 18_1500000 0.003175645 818 0.07
2 18_1400000 0.006443815 819 0.07
2 18_1300000 0.009875012 820 0.08
2 18_1200000 0 821 0.09
2 18_1100000 0.003401295 822 0.09
2 18_1000000 0.010216646 823 0.09
2 18_900000 0.010441116 824 0.10
2 18_800000 0.014803148 825 0.11
2 18_700000 0.003677067 826 0.13
2 18_600000 0.007359663 827 0.13
2 18_500000 0.015688908 828 0.14
2 18_400000 0.003425483 829 0.15
2 18_300000 0.013884005 830 0.16
2 18_200000 0.007303409 831 0.17
2 18_100000 0.003119114 832 0.18
2 18_0 0 833 0.18
2 89_0 0.002858698 834 0.18
2 89_100000 0.013939893 835 0.19
2 89_200000 0.00528282 836 0.20
2 89_300000 0.002429163 837 0.20
2 89_400000 0 838 0.21
2 89_500000 0.009781137 839 0.21
2 89_600000 0 840 0.22
2 89_700000 0.007602476 841 0.22
2 89_800000 0.006376355 842 0.22
2 89_900000 0.003246598 843 0.23
2 89_1000000 0.004942387 844 0.23
2 44a_700000 0 845 0.24
2 44a_600000 0.007269122 846 0.24
2 44a_500000 0 847 0.25
2 44a_400000 0.007407714 848 0.25
2 44a_300000 0.007367379 849 0.25
2 44a_200000 0.004828758 850 0.26
2 44a_100000 0 851 0.27
2 44a_0 0.004035509 852 0.27
2 212_0 0.003912778 853 0.27
2 212_100000 0 854 0.27
2 212_200000 0.002449958 855 0.27
2 212_300000 0 856 0.28
2 212_400000 0 857 0.28
2 249_0 0.007746869 858 0.28
2 249_100000 0.005988999 859 0.28
2 249_200000 0.015986907 860 0.29
2 81_0 0 861 0.31
2 81_100000 0 862 0.31
2 81_200000 0 863 0.31
2 81_300000 0.004894689 864 0.31
2 81_400000 3.43E-005 865 0.31
2 81_500000 0.004884262 866 0.31
2 81_700000 0 867 0.32
2 81_800000 0 868 0.32
2 81_900000 0.006643098 869 0.32
2 19_1500000 0 870 0.32
2 19_1300000 0 871 0.32
2 19_1200000 0.001954037 872 0.32
2 19_800000 0.001684641 873 0.32
2 19_600000 0 874 0.33
2 19_400000 0 875 0.33
2 19_300000 0 876 0.33
2 19_200000 0.004604319 877 0.33
2 19_100000 0 878 0.33
2 112_800000 0 879 0.33
2 112_600000 0 880 0.33
2 112_400000 0.019236716 881 0.33
2 112_200000 0.003184506 882 0.35
2 112_100000 0.010190294 883 0.35
2 216_300000 0 884 0.36
2 73_400000 0.011462422 885 0.36
2 73_500000 0.010491603 886 0.37
2 73_600000 0 887 0.39
2 73_800000 0 888 0.39
2 73_1200000 0.005094568 889 0.39
2 65_0 0 890 0.39
2 65_300000 0.002488846 891 0.39
2 65_500000 0 892 0.39
2 65_600000 0 893 0.39
2 65_900000 0 894 0.39
2 65_1000000 0.002389887 895 0.39
2 65_1100000 0.00755793 896 0.40
2 42_300000 0 897 0.40
2 42_400000 0 898 0.40
2 42_600000 0.004422934 899 0.40
2 42_800000 0.007400252 900 0.41
2 42_1000000 0.001649544 901 0.41
2 42_1100000 0.001652992 902 0.42
2 42_1200000 0.002508604 903 0.42
2 42_1400000 0.004945241 904 0.42
2 42_1500000 0 905 0.43
2 42_1600000 0 906 0.43
2 173_500000 0.00717779 907 0.43
2 173_400000 0.000336405 908 0.43
2 173_300000 0.000927364 909 0.43
2 173_200000 0 910 0.43
2 173_100000 0 911 0.43
2 173_0 0.012690152 912 0.43
2 41a_0 0.0105338 913 0.45
2 41a_100000 0 914 0.46
2 41a_200000 0.003566284 915 0.46
2 41a_300000 0.01249709 916 0.46
2 41a_400000 0 917 0.47
2 41a_500000 0.009965896 918 0.47
2 41a_600000 0 919 0.48
2 41a_700000 0.002378434 920 0.48
2 41a_800000 0.009475388 921 0.49
2 41a_900000 0 922 0.49
2 41a_1000000 0 923 0.49
2 41a_1100000 0.007168297 924 0.49
2 41a_1200000 0.018036341 925 0.50
2 41a_1400000 0.002348803 926 0.52
2 41a_1500000 0.013988608 927 0.52
2 429_0 0 928 0.54
2 151_0 0 929 0.54
2 151_100000 0.009502937 930 0.54
2 151_200000 0.011265221 931 0.55
2 151_300000 0 932 0.56
2 151_400000 0.01980097 933 0.56
2 151_500000 0 934 0.58
2 151_600000 0 935 0.58
2 27_0 0.008647035 936 0.58
2 27_100000 0.005365486 937 0.59
2 27_200000 0.014098457 938 0.59
2 27_300000 0.004307988 939 0.61
2 27_400000 0.009975337 940 0.61
2 27_500000 0.008540891 941 0.62
2 27_600000 0.004298951 942 0.63
2 27_700000 0.01564679 943 0.63
2 27_800000 0.00689461 944 0.65
2 27_900000 0.005584263 945 0.65
2 27_1000000 0.007765723 946 0.66
2 27_1100000 0.01639019 947 0.67
2 27_1200000 0.003330355 948 0.68
2 27_1300000 0.007187961 949 0.69
2 27_1400000 0.027221777 950 0.70
2 27_1500000 0.017298982 951 0.72
2 27_1600000 0.02378952 952 0.74
2 27_1700000 0.010951818 953 0.76
2 27_1800000 0.005428008 954 0.77
2 27_1900000 0 955 0.78
2 27_2000000 956 0.78
3 23_0 0.059135703 957 0.00
3 23_300000 0.116197007 958 0.06
3 23_600000 0.023694223 959 0.18
3 23_900000 0.04320968 960 0.20
3 23_1000000 0 961 0.24
3 23_1100000 0.011619443 962 0.24
3 23_1600000 0 963 0.25
3 23_1800000 0 964 0.25
3 23_1900000 0.014433005 965 0.25
3 23_2100000 0 966 0.27
3 72_0 0 967 0.27
3 72_100000 0.01310402 968 0.27
3 72_200000 0.005235308 969 0.28
3 116_0 0.079911743 970 0.29
3 116_100000 0 971 0.37
3 116_300000 0.008597395 972 0.37
3 116_400000 0 973 0.38
3 116_500000 0 974 0.38
3 116_600000 0 975 0.38
3 116_700000 0 976 0.38
3 62_1200000 0 977 0.38
3 246_200000 0.00489727 978 0.38
3 55a_300000 0 979 0.38
3 55a_200000 0 980 0.38
3 55a_100000 0 981 0.38
3 55a_0 0.011099935 982 0.38
3 316_100000 0 983 0.39
3 316_0 0.011151057 984 0.39
3 137_0 0 985 0.40
3 137_100000 0.007358801 986 0.40
3 137_200000 0.002408918 987 0.41
3 137_300000 0.001936515 988 0.41
3 137_400000 0.085621482 989 0.41
3 70b_0 0.024772558 990 0.50
3 389_0 0.052591756 991 0.52
3 35_900000 0.021318383 992 0.58
3 35_500000 0.039824194 993 0.60
3 35_0 0.027981467 994 0.64
3 265_200000 0.020569079 995 0.67
3 265_100000 0.019205218 996 0.69
3 5a_1800000 0 997 0.71
3 5a_1700000 0.004056324 998 0.71
3 5a_1600000 0 999 0.71
3 5a_1500000 0.010108605 1000 0.71
3 5a_1400000 0 1001 0.72
3 5a_1300000 0 1002 0.72
3 5a_1200000 0.008117869 1003 0.72
3 5a_1100000 0 1004 0.73
3 5a_1000000 0 1005 0.73
3 5a_900000 0.00532641 1006 0.73
3 5a_800000 0.006929672 1007 0.73
3 5a_700000 0.009370304 1008 0.74
3 5a_600000 0 1009 0.75
3 5a_500000 0.007327877 1010 0.75
3 5a_400000 0.0026813 1011 0.76
3 5a_300000 0.004180932 1012 0.76
3 5a_200000 0.01400109 1013 0.76
3 106a_0 0.004823031 1014 0.78
3 106a_200000 0.004864968 1015 0.78
3 106a_300000 0 1016 0.79
3 106a_400000 0 1017 0.79
3 106a_500000 0.00262049 1018 0.79
3 106a_600000 0.010841494 1019 0.79
3 106a_700000 0.012179274 1020 0.80
3 258_300000 0.006958273 1021 0.81
3 258_200000 0.006990203 1022 0.82
3 258_100000 0.009403826 1023 0.83
3 258_0 0.003560915 1024 0.84
3 169_500000 0.002550456 1025 0.84
3 169_400000 0 1026 0.84
3 169_300000 0.003127343 1027 0.84
3 169_200000 0.017542852 1028 0.85
3 169_100000 0.003807517 1029 0.86
3 169_0 0 1030 0.87
3 98_1000000 0.003067915 1031 0.87
3 98_900000 0.034197531 1032 0.87
3 98_800000 0.006150011 1033 0.90
3 98_700000 0.021334348 1034 0.91
3 98_600000 0 1035 0.93
3 98_500000 0.019017418 1036 0.93
3 98_400000 0.009328543 1037 0.95
3 98_300000 0.008266186 1038 0.96
3 98_200000 0.009174673 1039 0.97
3 98_100000 0.009261923 1040 0.98
3 98_0 0.011258444 1041 0.99
3 261_0 0.012990073 1042 1.00
3 261_100000 0.008547525 1043 1.01
3 261_200000 0.005642946 1044 1.02
3 273a_0 0.007687322 1045 1.03
3 64_1300000 0.03138704 1046 1.03
3 64_1200000 0.008718854 1047 1.06
3 64_1100000 0.007888362 1048 1.07
3 64_1000000 0.012656763 1049 1.08
3 64_900000 0.004253932 1050 1.09
3 64_800000 0.012664083 1051 1.10
3 64_700000 0.010840494 1052 1.11
3 64_600000 0.016208861 1053 1.12
3 64_500000 0.011546064 1054 1.14
3 64_400000 0.028775285 1055 1.15
3 64_300000 0 1056 1.18
3 64_200000 0.008087788 1057 1.18
3 64_100000 0 1058 1.19
3 64_0 1059 1.19
4 1_4900000 6.24E-005 1060 0.00
4 1_4800000 0 1061 0.00
4 1_4700000 0.005018727 1062 0.00
4 1_4600000 0.002433594 1063 0.01
4 1_4500000 0.004858455 1064 0.01
4 1_4400000 0 1065 0.01
4 1_4300000 0.016180531 1066 0.01
4 1_4200000 0.005133811 1067 0.03
4 1_4000000 0.010666755 1068 0.03
4 1_3900000 0 1069 0.04
4 1_3800000 0.016792236 1070 0.04
4 1_3700000 0.003008449 1071 0.06
4 1_3600000 0.003314806 1072 0.06
4 1_3500000 0.003808377 1073 0.07
4 1_3400000 0.013379994 1074 0.07
4 1_3300000 0.004100054 1075 0.08
4 1_3200000 0.003807063 1076 0.09
4 1_3100000 0.009403073 1077 0.09
4 1_3000000 0.012676432 1078 0.10
4 1_2900000 0.013617913 1079 0.11
4 1_2800000 0.004795977 1080 0.13
4 1_2700000 0.003212285 1081 0.13
4 1_2600000 0.006457804 1082 0.14
4 1_2500000 0.031311425 1083 0.14
4 1_2400000 0.008341264 1084 0.17
4 1_2300000 0.007662483 1085 0.18
4 1_2200000 0.004833449 1086 0.19
4 1_2100000 0 1087 0.19
4 1_2000000 0.002396248 1088 0.19
4 1_1900000 0.016104668 1089 0.20
4 1_1800000 0.007509959 1090 0.21
4 1_1700000 0.016417778 1091 0.22
4 1_1600000 0.005925397 1092 0.24
4 1_1500000 0.02139644 1093 0.24
4 1_1400000 0.004576005 1094 0.26
4 1_1300000 0.017701988 1095 0.27
4 1_1200000 0 1096 0.29
4 1_1100000 0.029718831 1097 0.29
4 1_1000000 0.001663627 1098 0.32
4 1_900000 0.004927658 1099 0.32
4 1_800000 0.004856094 1100 0.32
4 1_700000 0 1101 0.33
4 1_600000 0 1102 0.33
4 1_500000 0.008611552 1103 0.33
4 1_400000 0.008501009 1104 0.34
4 1_300000 0 1105 0.35
4 1_200000 0.004943839 1106 0.35
4 1_100000 0 1107 0.35
4 1_0 0 1108 0.35
4 54_1400000 0.002930697 1109 0.35
4 54_1300000 0.005321747 1110 0.35
4 54_1200000 0.008804252 1111 0.36
4 54_1100000 0.001336663 1112 0.37
4 54_1000000 0.018908261 1113 0.37
4 54_900000 0.000140468 1114 0.39
4 54_800000 0.001827446 1115 0.39
4 54_700000 0.005090003 1116 0.39
4 54_600000 0.004796729 1117 0.39
4 54_500000 0.007453835 1118 0.40
4 54_300000 0 1119 0.41
4 54_200000 0.004837025 1120 0.41
4 54_100000 0 1121 0.41
4 54_0 0 1122 0.41
4 120_800000 0 1123 0.41
4 120_700000 0 1124 0.41
4 120_600000 0 1125 0.41
4 120_400000 0 1126 0.41
4 120_300000 0 1127 0.41
4 120_200000 0 1128 0.41
4 120_100000 0 1129 0.41
4 120_0 0.00664536 1130 0.41
4 150_300000 0.016543233 1131 0.42
4 150_0 0.003294611 1132 0.43
4 57_100000 0 1133 0.44
4 57_300000 0 1134 0.44
4 57_400000 0 1135 0.44
4 57_500000 0 1136 0.44
4 57_600000 0 1137 0.44
4 57_700000 0.003542513 1138 0.44
4 57_1300000 0.011178328 1139 0.44
4 254a_100000 0.008468801 1140 0.45
4 254a_0 0 1141 0.46
4 110_600000 0 1142 0.46
4 110_300000 0.005428155 1143 0.46
4 110_200000 0 1144 0.47
4 110_0 0 1145 0.47
4 154_500000 0 1146 0.47
4 154_400000 0 1147 0.47
4 154_300000 0.006663107 1148 0.47
4 154_200000 0 1149 0.47
4 154_0 0.004461053 1150 0.47
4 177_400000 0 1151 0.48
4 177_300000 0 1152 0.48
4 177_200000 0 1153 0.48
4 177_100000 0.004895782 1154 0.48
4 177_0 0 1155 0.48
4 6a_2300000 0.002523236 1156 0.48
4 6a_2200000 0.002487124 1157 0.49
4 6a_2100000 0 1158 0.49
4 6a_2000000 0 1159 0.49
4 6a_1900000 0.002544539 1160 0.49
4 6a_1800000 0 1161 0.49
4 6a_1700000 0 1162 0.49
4 6a_1600000 0.019850319 1163 0.49
4 6a_1300000 0 1164 0.51
4 6a_1200000 0 1165 0.51
4 6a_1100000 0 1166 0.51
4 6a_1000000 0.009773322 1167 0.51
4 6a_800000 0 1168 0.52
4 6a_700000 0 1169 0.52
4 6a_600000 0 1170 0.52
4 6a_500000 0.019321415 1171 0.52
4 6a_100000 0.010271584 1172 0.54
4 6a_0 0 1173 0.55
4 55b_0 0.025779775 1174 0.55
4 55b_200000 0.00578589 1175 0.58
4 55b_300000 0.003953491 1176 0.58
4 55b_400000 0 1177 0.58
4 55b_500000 0.007362379 1178 0.58
4 55b_1000000 0 1179 0.59
4 346_0 0 1180 0.59
4 346_100000 0 1181 0.59
4 443_0 0 1182 0.59
4 320_100000 0.019636889 1183 0.59
4 320_0 0.007697294 1184 0.61
4 96a_0 0.004395585 1185 0.62
4 96a_100000 0.016133042 1186 0.62
4 96a_200000 0.015883282 1187 0.64
4 96a_300000 0.024397703 1188 0.66
4 96a_500000 0.013217307 1189 0.68
4 96a_600000 0.009608154 1190 0.69
4 96a_700000 0.015407844 1191 0.70
4 96a_800000 0.005584305 1192 0.72
4 96a_900000 0 1193 0.72
4 85_1100000 0.004116531 1194 0.72
4 85_1000000 0.013175248 1195 0.73
4 85_900000 0.007327157 1196 0.74
4 85_700000 0 1197 0.75
4 85_600000 0.027422483 1198 0.75
4 85_200000 0.008714469 1199 0.78
4 85_100000 0 1200 0.79
4 1287_0 0.006984148 1201 0.79
4 196_500000 0.005619589 1202 0.79
4 196_300000 0.028762881 1203 0.80
4 196_0 0 1204 0.83
4 541_0 0.020637467 1205 0.83
4 766_0 0.024123755 1206 0.85
4 44c_200000 0.012645357 1207 0.87
4 384_0 0.003055144 1208 0.88
4 479_0 0 1209 0.89
4 638_0 0.00966599 1210 0.89
4 163_100000 0.038445919 1211 0.90
4 163_400000 0.005981349 1212 0.94
4 163_500000 0.005796199 1213 0.94
4 633_0 0.09513289 1214 0.95
4 682_0 1215 1.04
5 129a_0 0 1216 0.00
5 129a_100000 0.009867706 1217 0.00
5 129a_200000 0.010948734 1218 0.01
5 66_0 0.010786443 1219 0.02
5 66_100000 0.016091175 1220 0.03
5 66_200000 0.012261134 1221 0.05
5 66_300000 0 1222 0.06
5 66_400000 0.010955326 1223 0.06
5 66_500000 0.016337462 1224 0.07
5 66_600000 0 1225 0.09
5 66_700000 0.017410214 1226 0.09
5 66_800000 0 1227 0.10
5 66_900000 0.021907347 1228 0.10
5 66_1000000 0.011361253 1229 0.13
5 66_1100000 0.010473615 1230 0.14
5 66_1200000 0.014773638 1231 0.15
5 32_0 0 1232 0.16
5 32_100000 0.017923196 1233 0.16
5 32_200000 0.01124134 1234 0.18
5 32_300000 0.003890905 1235 0.19
5 32_400000 0.00712779 1236 0.20
5 32_500000 0.009223216 1237 0.20
5 32_600000 0.002310733 1238 0.21
5 32_700000 0.004920895 1239 0.21
5 32_800000 0 1240 0.22
5 32_900000 0.017190513 1241 0.22
5 32_1000000 0 1242 0.24
5 32_1100000 0.010092625 1243 0.24
5 32_1200000 0.003214331 1244 0.25
5 32_1300000 0.010396914 1245 0.25
5 32_1400000 0.015279069 1246 0.26
5 32_1600000 0.005291441 1247 0.28
5 32_1700000 0.01706223 1248 0.28
5 32_1800000 0.006044414 1249 0.30
5 205_0 0.002757521 1250 0.30
5 205_100000 0 1251 0.31
5 205_200000 0.008212777 1252 0.31
5 205_300000 0.005073212 1253 0.32
5 205_400000 0.011263971 1254 0.32
5 1147_0 0 1255 0.33
5 461_0 0.006474622 1256 0.33
5 242_0 0.016716269 1257 0.34
5 242_100000 0 1258 0.35
5 242_200000 0 1259 0.35
5 242_300000 0 1260 0.35
5 104a_0 0.01219969 1261 0.35
5 104a_100000 0.008780287 1262 0.37
5 104a_200000 0.001908295 1263 0.38
5 244_100000 0.011633857 1264 0.38
5 244_0 0.020411918 1265 0.39
5 111a_300000 0.016505906 1266 0.41
5 111a_400000 0.005458001 1267 0.43
5 111a_500000 0 1268 0.43
5 111a_600000 0.005195375 1269 0.43
5 94_0 0 1270 0.44
5 94_100000 0 1271 0.44
5 94_200000 0.015419681 1272 0.44
5 94_400000 0 1273 0.45
5 94_600000 0.011777832 1274 0.45
5 94_800000 0 1275 0.46
5 94_900000 0.011115902 1276 0.46
5 88_600000 0 1277 0.48
5 88_400000 0 1278 0.48
5 88_300000 0 1279 0.48
5 141_100000 0.006617218 1280 0.48
5 141_300000 0.031973791 1281 0.48
5 53_500000 0.041552106 1282 0.51
5 304_100000 0.015033687 1283 0.56
5 326_0 0.024580831 1284 0.57
5 326_100000 0.01662449 1285 0.60
5 327_100000 0 1286 0.61
5 327_0 0 1287 0.61
5 158_0 0 1288 0.61
5 158_100000 0 1289 0.61
5 158_200000 0 1290 0.61
5 158_300000 0 1291 0.61
5 158_400000 0 1292 0.61
5 158_500000 0 1293 0.61
5 158_600000 0 1294 0.61
5 449_0 0 1295 0.61
5 170_0 0 1296 0.61
5 170_100000 0 1297 0.61
5 170_200000 0 1298 0.61
5 170_300000 0 1299 0.61
5 170_400000 0 1300 0.61
5 170_500000 0.001254656 1301 0.61
5 36_0 0.001186542 1302 0.61
5 36_100000 0 1303 0.61
5 36_200000 0 1304 0.61
5 36_300000 0 1305 0.61
5 36_400000 0 1306 0.61
5 36_500000 0 1307 0.61
5 36_600000 0 1308 0.61
5 36_700000 0 1309 0.61
5 36_800000 0 1310 0.61
5 36_900000 0 1311 0.61
5 36_1000000 0 1312 0.61
5 36_1100000 0 1313 0.61
5 36_1200000 0 1314 0.61
5 36_1300000 0 1315 0.61
5 36_1400000 0 1316 0.61
5 36_1500000 0 1317 0.61
5 36_1600000 0 1318 0.61
5 36_1700000 0 1319 0.61
5 266_0 0 1320 0.61
5 266_100000 0 1321 0.61
5 266_200000 0.00175749 1322 0.61
5 149_0 0 1323 0.62
5 149_300000 0.010358864 1324 0.62
5 197_400000 0.010709693 1325 0.63
5 197_300000 0.03302368 1326 0.64
5 197_200000 0 1327 0.67
5 197_0 0.070498501 1328 0.67
5 288_100000 0.084042696 1329 0.74
5 226_0 1330 0.82
6 270_200000 0 1331 0.00
6 270_100000 0 1332 0.00
6 270_0 0.017443454 1333 0.00
6 104b_500000 0.013799308 1334 0.02
6 104b_400000 0.006033355 1335 0.03
6 104b_300000 0.008490192 1336 0.04
6 104b_200000 0.014795461 1337 0.05
6 104b_100000 0.005700391 1338 0.06
6 104b_0 0.002282714 1339 0.07
6 51_1400000 0.00662148 1340 0.07
6 51_1300000 0.001171639 1341 0.08
6 51_1200000 0.005116911 1342 0.08
6 51_1100000 0.013796846 1343 0.08
6 51_1000000 0.002227339 1344 0.10
6 51_900000 0.017248601 1345 0.10
6 51_800000 0.005285783 1346 0.11
6 51_700000 0.011692302 1347 0.12
6 51_600000 0.015208205 1348 0.13
6 51_500000 0.003648189 1349 0.15
6 51_400000 0.013861062 1350 0.15
6 51_300000 0.008062339 1351 0.16
6 51_200000 0.016438674 1352 0.17
6 51_100000 0 1353 0.19
6 51_0 0 1354 0.19
6 8_3400000 0.009855909 1355 0.19
6 8_3300000 0 1356 0.20
6 8_3200000 0 1357 0.20
6 8_3100000 0.012310828 1358 0.20
6 8_3000000 0.018143124 1359 0.21
6 8_2900000 0.010934456 1360 0.23
6 8_2800000 0.005657344 1361 0.24
6 8_2700000 0 1362 0.25
6 8_2600000 0.017226841 1363 0.25
6 8_2500000 0.004793559 1364 0.26
6 8_2400000 0.006213202 1365 0.27
6 8_2300000 0.005933613 1366 0.27
6 8_2200000 0.024855616 1367 0.28
6 8_2000000 0.003323635 1368 0.30
6 8_1900000 0.006482271 1369 0.31
6 8_1800000 0.018062462 1370 0.31
6 8_1700000 0.007476108 1371 0.33
6 8_1600000 0.004127005 1372 0.34
6 8_1500000 0.006539316 1373 0.34
6 8_1400000 0.003164868 1374 0.35
6 8_1300000 0.012425314 1375 0.35
6 8_1200000 0 1376 0.37
6 8_1100000 0.011318585 1377 0.37
6 8_1000000 0.003361954 1378 0.38
6 8_900000 0 1379 0.38
6 8_800000 0.004855973 1380 0.38
6 8_700000 0 1381 0.39
6 8_600000 0.003732246 1382 0.39
6 8_500000 0.01095691 1383 0.39
6 8_400000 0.002505922 1384 0.40
6 8_300000 0.002412405 1385 0.40
6 8_200000 0 1386 0.41
6 8_100000 0 1387 0.41
6 8_0 0.007427917 1388 0.41
6 293_100000 0.003695653 1389 0.41
6 293_0 0.003692459 1390 0.42
6 379a_0 0.002386939 1391 0.42
6 21_0 0 1392 0.42
6 21_100000 0.007389815 1393 0.42
6 21_200000 0 1394 0.43
6 21_300000 0.00491595 1395 0.43
6 21_500000 0 1396 0.44
6 21_600000 0 1397 0.44
6 21_700000 0.003592826 1398 0.44
6 21_1100000 0 1399 0.44
6 21_1200000 0.004308534 1400 0.44
6 21_1600000 0 1401 0.44
6 21_1700000 0.005589226 1402 0.44
6 307_100000 0 1403 0.45
6 136_0 0.002304909 1404 0.45
6 136_300000 0.003423229 1405 0.45
6 136_400000 0.006737962 1406 0.45
6 136_500000 0 1407 0.46
6 136_600000 0 1408 0.46
6 67_100000 0 1409 0.46
6 67_200000 0 1410 0.46
6 67_300000 0 1411 0.46
6 67_500000 0 1412 0.46
6 67_600000 0.008080516 1413 0.46
6 248_100000 0.00551113 1414 0.47
6 43_1000000 0.006292322 1415 0.47
6 43_1100000 0 1416 0.48
6 43_1200000 0 1417 0.48
6 43_1300000 0 1418 0.48
6 43_1400000 0 1419 0.48
6 43_1500000 0 1420 0.48
6 119_100000 0 1421 0.48
6 119_200000 0 1422 0.48
6 119_400000 0 1423 0.48
6 119_500000 0.00375943 1424 0.48
6 119_600000 0.003598975 1425 0.48
6 25_1000000 0 1426 0.49
6 25_900000 0.002438553 1427 0.49
6 25_800000 0 1428 0.49
6 25_600000 0.002827383 1429 0.49
6 25_500000 0.002719256 1430 0.49
6 25_400000 0.00877218 1431 0.50
6 25_300000 0.003624547 1432 0.51
6 25_200000 0.001504569 1433 0.51
6 25_100000 0 1434 0.51
6 25_0 0.006495702 1435 0.51
6 190_400000 0.006200864 1436 0.52
6 190_300000 0.003618507 1437 0.52
6 190_200000 0.02763999 1438 0.53
6 190_100000 0.00242527 1439 0.55
6 190_0 0.006766255 1440 0.56
6 179_200000 0.01565527 1441 0.56
6 179_100000 0.007352779 1442 0.58
6 179_0 0.001061767 1443 0.59
6 262_200000 0.001927336 1444 0.59
6 262_100000 0.004193592 1445 0.59
6 262_0 0.002424991 1446 0.59
6 16_2400000 0 1447 0.60
6 16_2300000 0.004889933 1448 0.60
6 16_2200000 0 1449 0.60
6 16_2100000 0 1450 0.60
6 16_2000000 0.003657916 1451 0.60
6 16_1900000 0.003676082 1452 0.60
6 16_1800000 0 1453 0.61
6 16_1700000 0.005469011 1454 0.61
6 16_1600000 0.011089482 1455 0.61
6 16_1500000 0.00549945 1456 0.62
6 16_1400000 0.00460557 1457 0.63
6 16_1300000 0.013675412 1458 0.63
6 16_1200000 0.017863676 1459 0.65
6 16_1100000 0.014023513 1460 0.67
6 16_1000000 0.009102647 1461 0.68
6 16_900000 0 1462 0.69
6 16_800000 0.004898648 1463 0.69
6 16_700000 0 1464 0.69
6 16_600000 0.008071297 1465 0.69
6 16_500000 0.003528788 1466 0.70
6 16_400000 0.015862124 1467 0.71
6 16_300000 0.010419401 1468 0.72
6 16_200000 0.035837681 1469 0.73
6 16_100000 0 1470 0.77
6 16_0 1471 0.77
7 452_0 0 1472 0.00
7 251_200000 0 1473 0.00
7 251_100000 0.005572984 1474 0.00
7 251_0 0.011356986 1475 0.01
7 335_100000 0.014544252 1476 0.02
7 335_0 0 1477 0.03
7 113_900000 0.048815912 1478 0.03
7 113_600000 0.00493629 1479 0.08
7 113_500000 0.012594154 1480 0.09
7 113_400000 0.014027944 1481 0.10
7 113_300000 0.021226672 1482 0.11
7 113_100000 0.002515593 1483 0.13
7 130_800000 0.011179784 1484 0.14
7 130_700000 0 1485 0.15
7 130_600000 0.007622364 1486 0.15
7 130_500000 0.026959024 1487 0.15
7 130_400000 0.012085101 1488 0.18
7 130_300000 0.013312183 1489 0.19
7 130_100000 0.002671913 1490 0.21
7 130_0 0 1491 0.21
7 416_0 0.006462623 1492 0.21
7 2724_0 0.035501939 1493 0.22
7 255_200000 0.002612742 1494 0.25
7 255_100000 0.016532443 1495 0.25
7 476_0 0 1496 0.27
7 9c_500000 0.007547183 1497 0.27
7 9c_400000 0.006115148 1498 0.28
7 9c_300000 0 1499 0.28
7 9c_200000 0.002457333 1500 0.28
7 9c_100000 0.033462759 1501 0.29
7 114b_0 0.012041471 1502 0.32
7 114b_100000 0.002540978 1503 0.33
7 114b_200000 0.009271519 1504 0.33
7 114b_300000 0.013183318 1505 0.34
7 114b_500000 0.01044913 1506 0.36
7 274_0 0.008814312 1507 0.37
7 274_100000 0 1508 0.38
7 274_200000 0 1509 0.38
7 29_0 0.011937787 1510 0.38
7 29_100000 0.00714701 1511 0.39
7 29_200000 0 1512 0.40
7 29_300000 0.003111196 1513 0.40
7 29_400000 0.004301668 1514 0.40
7 29_500000 0.003425616 1515 0.40
7 29_700000 0.015091851 1516 0.41
7 29_900000 0.010762518 1517 0.42
7 29_1100000 0.009257287 1518 0.43
7 29_1200000 0.007871968 1519 0.44
7 29_1400000 0.007089099 1520 0.45
7 29_1500000 0 1521 0.46
7 29_1700000 0.001597673 1522 0.46
7 29_1800000 0.007259007 1523 0.46
7 166_500000 0.005041222 1524 0.47
7 166_400000 0 1525 0.47
7 166_300000 0 1526 0.47
7 166_200000 0.004350304 1527 0.47
7 166_0 0.011566534 1528 0.47
7 56_1400000 0 1529 0.49
7 56_1000000 0.014689594 1530 0.49
7 56_900000 0.066325362 1531 0.50
7 229_100000 0.003699598 1532 0.57
7 97a_100000 0.008355286 1533 0.57
7 97a_200000 0.014079144 1534 0.58
7 97a_500000 0.013932265 1535 0.59
7 279_0 0 1536 0.61
7 351_0 0.004807008 1537 0.61
7 351_100000 0.0521373 1538 0.61
7 105_400000 0 1539 0.66
7 105_300000 0.015531125 1540 0.66
7 105_0 0 1541 0.68
7 184_0 0.004108992 1542 0.68
7 47a_400000 0.004537508 1543 0.68
7 14_2600000 0.001878771 1544 0.69
7 14_2200000 0 1545 0.69
7 14_2000000 0 1546 0.69
7 14_1900000 0 1547 0.69
7 14_1600000 0.000454753 1548 0.69
7 14_1500000 0.006228533 1549 0.69
7 14_1400000 0.077067064 1550 0.70
7 14_800000 0.157518356 1551 0.77
7 14_400000 1552 0.93
8 109_800000 0.005490345 1553 0.00
8 109_700000 0.016028673 1554 0.01
8 109_600000 0.059242411 1555 0.02
8 109_500000 0.010627504 1556 0.08
8 109_400000 0.018534223 1557 0.09
8 109_300000 0.006635112 1558 0.11
8 109_200000 0.025353622 1559 0.12
8 109_100000 0.011820225 1560 0.14
8 109_0 0 1561 0.15
8 118_200000 0 1562 0.15
8 118_300000 0 1563 0.15
8 118_400000 0 1564 0.15
8 155_0 0 1565 0.15
8 155_100000 0 1566 0.15
8 155_200000 0 1567 0.15
8 155_300000 0 1568 0.15
8 155_400000 0 1569 0.15
8 155_600000 0 1570 0.15
8 76_1100000 0 1571 0.15
8 76_1000000 0 1572 0.15
8 76_900000 0 1573 0.15
8 76_800000 0 1574 0.15
8 76_700000 0 1575 0.15
8 76_600000 0 1576 0.15
8 76_500000 0 1577 0.15
8 76_400000 0 1578 0.15
8 233_300000 0 1579 0.15
8 233_200000 0 1580 0.15
8 233_100000 0 1581 0.15
8 233_0 0 1582 0.15
8 11_2900000 0 1583 0.15
8 11_2800000 0 1584 0.15
8 11_2700000 0 1585 0.15
8 11_2600000 0 1586 0.15
8 11_2500000 0 1587 0.15
8 11_2400000 0 1588 0.15
8 11_2300000 0 1589 0.15
8 11_2200000 0 1590 0.15
8 11_2100000 0 1591 0.15
8 11_2000000 0 1592 0.15
8 11_1900000 0 1593 0.15
8 11_1800000 0 1594 0.15
8 11_1700000 0 1595 0.15
8 11_1600000 0 1596 0.15
8 11_1500000 0 1597 0.15
8 11_1400000 0 1598 0.15
8 11_1300000 0 1599 0.15
8 11_1200000 0 1600 0.15
8 11_1100000 0 1601 0.15
8 11_1000000 0 1602 0.15
8 11_900000 0 1603 0.15
8 11_800000 0 1604 0.15
8 11_700000 0 1605 0.15
8 11_600000 0 1606 0.15
8 11_500000 0 1607 0.15
8 11_400000 0 1608 0.15
8 11_300000 0 1609 0.15
8 11_200000 0 1610 0.15
8 11_100000 0 1611 0.15
8 11_0 0 1612 0.15
8 59_0 0 1613 0.15
8 59_100000 0 1614 0.15
8 59_200000 0 1615 0.15
8 59_400000 0 1616 0.15
8 59_600000 0 1617 0.15
8 59_700000 0.005229293 1618 0.15
8 59_900000 0 1619 0.16
8 59_1400000 0.003207007 1620 0.16
8 217_100000 0.015142649 1621 0.16
8 77_1000000 0.007251019 1622 0.18
8 77_900000 0 1623 0.18
8 77_800000 0 1624 0.18
8 38_1400000 0.013058148 1625 0.18
8 38_1500000 0.013207392 1626 0.20
8 46_100000 0.006694613 1627 0.21
8 46_200000 0 1628 0.22
8 46_300000 0 1629 0.22
8 46_400000 0 1630 0.22
8 46_500000 0 1631 0.22
8 46_600000 0 1632 0.22
8 46_700000 0 1633 0.22
8 46_800000 0 1634 0.22
8 46_900000 0 1635 0.22
8 46_1000000 0 1636 0.22
8 46_1300000 0 1637 0.22
8 46_1400000 0 1638 0.22
8 46_1500000 0 1639 0.22
8 412_0 0.006076753 1640 0.22
8 12b_0 0.004649797 1641 0.22
8 12b_100000 0.00446878 1642 0.23
8 12b_200000 0.005854982 1643 0.23
8 12b_300000 0.001102432 1644 0.24
8 12b_400000 0.005797041 1645 0.24
8 12b_500000 0.005920314 1646 0.25
8 12b_600000 0.008751291 1647 0.25
8 12b_700000 0 1648 0.26
8 12b_800000 0 1649 0.26
8 12b_900000 0.011034964 1650 0.26
8 12b_1000000 0.002908557 1651 0.27
8 12b_1100000 0.023333353 1652 0.27
8 12b_1200000 0.011439338 1653 0.30
8 12b_1300000 0.024927871 1654 0.31
8 12b_1400000 0.004088915 1655 0.33
8 12b_1500000 0.008757839 1656 0.34
8 12b_1600000 0.01962548 1657 0.35
8 12b_1700000 0.002842172 1658 0.37
8 12b_1800000 0.004362739 1659 0.37
8 12b_1900000 0.006192418 1660 0.37
8 12b_2000000 0.016045644 1661 0.38
8 12b_2100000 0.005558599 1662 0.40
8 12b_2200000 0 1663 0.40
8 12b_2300000 0.007323153 1664 0.40
8 12b_2400000 0.011119575 1665 0.41
8 24_2100000 0 1666 0.42
8 24_2000000 0.016315687 1667 0.42
8 24_1900000 0.004911683 1668 0.44
8 24_1800000 0 1669 0.44
8 24_1700000 0.007416462 1670 0.44
8 24_1600000 0 1671 0.45
8 24_1500000 0.002463823 1672 0.45
8 24_1400000 0 1673 0.45
8 24_1300000 0 1674 0.45
8 24_1200000 0.003288691 1675 0.45
8 24_1100000 0.010481199 1676 0.45
8 24_1000000 0 1677 0.46
8 24_900000 0.009963977 1678 0.46
8 24_800000 0 1679 0.47
8 24_700000 0.01320505 1680 0.47
8 24_600000 0 1681 0.49
8 24_500000 0.004995549 1682 0.49
8 24_400000 0.002353616 1683 0.49
8 24_300000 0 1684 0.50
8 24_200000 0 1685 0.50
8 24_100000 0.009864398 1686 0.50
8 24_0 0 1687 0.50
8 45_1600000 0 1688 0.50
8 45_1500000 0.003881633 1689 0.50
8 45_1400000 0.002563841 1690 0.51
8 45_1300000 0.001335099 1691 0.51
8 45_1200000 0.005106589 1692 0.51
8 45_1100000 0 1693 0.52
8 45_1000000 0.012850212 1694 0.52
8 45_900000 0 1695 0.53
8 45_800000 0.010698696 1696 0.53
8 45_700000 0.003488673 1697 0.54
8 45_600000 0 1698 0.54
8 45_500000 0 1699 0.54
8 45_400000 0.021797135 1700 0.54
8 45_300000 0.008725764 1701 0.57
8 45_200000 0 1702 0.58
8 45_100000 0.002822704 1703 0.58
8 45_0 0 1704 0.58
8 285_200000 0.00735682 1705 0.58
8 285_0 0.01320659 1706 0.59
8 3_0 0 1707 0.60
8 3_100000 0 1708 0.60
8 3_200000 0 1709 0.60
8 3_300000 0 1710 0.60
8 3_400000 0.01063088 1711 0.60
8 3_500000 0 1712 0.61
8 3_600000 0 1713 0.61
8 3_700000 0.012483148 1714 0.61
8 3_800000 0.003910875 1715 0.62
8 3_900000 0 1716 0.63
8 3_1000000 0 1717 0.63
8 3_1100000 0.014694283 1718 0.63
8 3_1200000 0 1719 0.64
8 3_1300000 0 1720 0.64
8 3_1400000 0.014181021 1721 0.64
8 3_1500000 0.003009551 1722 0.65
8 3_1600000 0 1723 0.66
8 3_1700000 0.008288751 1724 0.66
8 3_1800000 0 1725 0.67
8 3_1900000 0 1726 0.67
8 3_2000000 0 1727 0.67
8 3_2100000 0.014800756 1728 0.67
8 3_2200000 0 1729 0.68
8 3_2300000 0.002375352 1730 0.68
8 3_2400000 0.004590468 1731 0.68
8 3_2500000 0 1732 0.69
8 3_2600000 0 1733 0.69
8 3_2700000 0.002499698 1734 0.69
8 3_2800000 0.002921713 1735 0.69
8 3_2900000 0.004920807 1736 0.69
8 3_3000000 0 1737 0.70
8 3_3100000 0.002414009 1738 0.70
8 3_3200000 0.004934505 1739 0.70
8 3_3300000 0.006157061 1740 0.71
8 3_3400000 0.004718809 1741 0.71
8 3_3500000 0.005460097 1742 0.72
8 3_3600000 0.006895946 1743 0.72
8 3_3700000 0.004831366 1744 0.73
8 3_3800000 0.005238315 1745 0.73
8 3_3900000 0.013903834 1746 0.74
8 3_4000000 0.010461965 1747 0.75
8 3_4100000 0.015428446 1748 0.76
8 3_4200000 0 1749 0.78
8 3_4300000 1750 0.78
9 68b_1100000 0.018657195 1751 0.00
9 68b_1000000 0.022061316 1752 0.02
9 68b_900000 0 1753 0.04
9 68b_800000 0.0031915 1754 0.04
9 68b_700000 0.014487887 1755 0.04
9 68b_600000 0.027359298 1756 0.06
9 68b_500000 0 1757 0.09
9 68b_400000 0.002216948 1758 0.09
9 68b_300000 0 1759 0.09
9 68b_200000 0.007568889 1760 0.09
9 68b_100000 0 1761 0.10
9 68b_0 0.014837082 1762 0.10
9 124_700000 0.005142594 1763 0.11
9 124_600000 0.032165449 1764 0.12
9 124_500000 0.01169245 1765 0.15
9 124_400000 0.006641002 1766 0.16
9 124_300000 0.004207099 1767 0.17
9 124_100000 0.007968018 1768 0.17
9 124_0 0.003298881 1769 0.18
9 20_2300000 0.003807529 1770 0.18
9 20_2200000 0 1771 0.19
9 20_2100000 0.0074269 1772 0.19
9 20_2000000 0 1773 0.19
9 20_1900000 0.004914661 1774 0.19
9 20_1800000 0 1775 0.20
9 20_1600000 0 1776 0.20
9 20_1400000 0 1777 0.20
9 20_1200000 0.00253029 1778 0.20
9 20_1100000 0 1779 0.20
9 20_1000000 0 1780 0.20
9 20_900000 0.017305979 1781 0.20
9 20_800000 0 1782 0.22
9 20_700000 0.002457361 1783 0.22
9 20_600000 0 1784 0.22
9 20_500000 0.011948698 1785 0.22
9 20_400000 0.00551183 1786 0.23
9 20_300000 0.011897881 1787 0.24
9 20_0 0.007277655 1788 0.25
9 330_100000 0.012780866 1789 0.26
9 152_300000 0.026950717 1790 0.27
9 152_100000 0 1791 0.30
9 152_0 0 1792 0.30
9 117a_100000 0 1793 0.30
9 147_500000 0 1794 0.30
9 352_100000 0.006635582 1795 0.30
9 352_0 0 1796 0.30
9 86_300000 0.049468484 1797 0.30
9 86_200000 0.040130741 1798 0.35
9 86_100000 0.005218978 1799 0.39
9 12a_0 0.003683199 1800 0.40
9 12a_100000 0.028643857 1801 0.40
9 97c_100000 0.10040556 1802 0.43
9 97c_200000 0.067018353 1803 0.53
9 103b_600000 0.048875506 1804 0.60
9 103b_400000 0.095454513 1805 0.65
9 103b_300000 0 1806 0.74
9 103b_200000 0 1807 0.74
9 84_1000000 0.002059077 1808 0.74
9 84_900000 0 1809 0.74
9 84_600000 0 1810 0.74
9 84_500000 0.003273452 1811 0.74
9 84_400000 0 1812 0.75
9 84_300000 0 1813 0.75
9 84_100000 0.004095175 1814 0.75
9 84_0 0 1815 0.75
9 238_0 0.004857662 1816 0.75
9 238_200000 0 1817 0.76
9 157_300000 0 1818 0.76
9 157_400000 0.021033662 1819 0.76
9 157_500000 0.008409027 1820 0.78
9 63b_900000 0.011280427 1821 0.79
9 63b_700000 0 1822 0.80
9 63b_600000 0.0093887 1823 0.80
9 63b_500000 0.002679134 1824 0.81
9 63b_400000 0.003220844 1825 0.81
9 63b_300000 0.002544933 1826 0.81
9 63b_200000 0 1827 0.81
9 63b_100000 0 1828 0.81
9 63b_0 0.002451058 1829 0.81
9 257a_100000 0.002411386 1830 0.82
9 257a_0 0.004859079 1831 0.82
9 7_3800000 0 1832 0.82
9 7_3700000 0.01007224 1833 0.82
9 7_3600000 0 1834 0.83
9 7_3400000 0 1835 0.83
9 7_3300000 0.009738506 1836 0.83
9 7_3200000 0.009667919 1837 0.84
9 7_3100000 0 1838 0.85
9 7_3000000 0.007354875 1839 0.85
9 7_2900000 0.004901093 1840 0.86
9 7_2800000 0 1841 0.87
9 7_2700000 0.004918727 1842 0.87
9 7_2600000 0.00295532 1843 0.87
9 7_2500000 0.009348417 1844 0.87
9 7_2400000 0.006155534 1845 0.88
9 7_2300000 0.005968603 1846 0.89
9 7_2200000 0.008423998 1847 0.90
9 7_2100000 0 1848 0.90
9 7_2000000 0.011309679 1849 0.90
9 7_1900000 0 1850 0.92
9 7_1800000 0.00324207 1851 0.92
9 7_1700000 0.007420857 1852 0.92
9 7_1600000 0.044207077 1853 0.93
9 7_1400000 0.002203971 1854 0.97
9 7_1300000 0.008262537 1855 0.97
9 7_1200000 0.00423221 1856 0.98
9 7_1100000 0 1857 0.98
9 7_1000000 0.009754551 1858 0.98
9 7_900000 0.016576712 1859 0.99
9 7_800000 0.011082789 1860 1.01
9 7_700000 0.010281502 1861 1.02
9 7_600000 0.014238614 1862 1.03
9 7_500000 0.033578539 1863 1.05
9 7_400000 0.00844906 1864 1.08
9 7_300000 0.019354952 1865 1.09
9 7_200000 0.017797991 1866 1.11
9 7_100000 0 1867 1.13
9 7_0 1868 1.13'''.split('\n')
IMF3 = '''10 223_100000 0 1 0.00
10 223_200000 0.020924873 2 0.00
10 223_300000 0 3 0.02
10 324_0 0.012173265 4 0.02
10 324_100000 0.017720923 5 0.03
10 4a_100000 0 6 0.05
10 749_0 0.003536227 7 0.05
10 48a_800000 0 8 0.05
10 48a_700000 0 9 0.05
10 48a_600000 0.016295425 10 0.05
10 48a_500000 0.020310035 11 0.07
10 48a_400000 0 12 0.09
10 48a_300000 0 13 0.09
10 48a_200000 0 14 0.09
10 48a_100000 0.002159812 15 0.09
10 48a_0 0.00508414 16 0.09
10 90_1000000 0 17 0.10
10 90_900000 0 18 0.10
10 90_800000 0.017787651 19 0.10
10 90_700000 0.042375963 20 0.12
10 90_600000 0 21 0.16
10 90_500000 0.00415369 22 0.16
10 90_400000 0.010671267 23 0.16
10 90_300000 0.005248119 24 0.17
10 90_200000 0.022332654 25 0.18
10 90_100000 0.025407143 26 0.20
10 90_0 0 27 0.23
10 206_400000 0 28 0.23
10 206_300000 0.057818585 29 0.23
10 206_200000 0 30 0.28
10 206_100000 0.016055521 31 0.28
10 206_0 0 32 0.30
10 210_100000 0 33 0.30
10 210_200000 0 34 0.30
10 210_300000 0 35 0.30
10 13_100000 0.053420278 36 0.30
10 13_300000 0.008611486 37 0.35
10 13_500000 0 38 0.36
10 13_600000 0.003170315 39 0.36
10 13_700000 0 40 0.37
10 13_800000 0.008264193 41 0.37
10 13_900000 0 42 0.37
10 13_1000000 0.032698355 43 0.37
10 13_1100000 0.023000589 44 0.41
10 13_1200000 0.009531721 45 0.43
10 13_1400000 0.009100484 46 0.44
10 13_1500000 0.032058734 47 0.45
10 13_1800000 0 48 0.48
10 13_1900000 0.005879455 49 0.48
10 13_2000000 0.042154612 50 0.49
10 13_2100000 0.003323724 51 0.53
10 13_2200000 0.0028409 52 0.53
10 13_2300000 0 53 0.53
10 13_2400000 0.00344792 54 0.53
10 13_2500000 0 55 0.54
10 13_2600000 0.25 56 0.54
10 172_500000 0.122850235 57 0.79
10 172_300000 0.03282125 58 0.91
10 172_200000 0.007720893 59 0.94
10 172_0 0.089531197 60 0.95
10 40_100000 0.003894323 61 1.04
10 40_200000 0 62 1.04
10 40_500000 0 63 1.04
10 40_600000 0.004053505 64 1.04
10 40_900000 0.027499396 65 1.05
10 40_1200000 0 66 1.08
10 40_1300000 0 67 1.08
10 40_1400000 0.013445883 68 1.08
10 33_400000 0 69 1.09
10 33_600000 0 70 1.09
10 33_700000 0 71 1.09
10 33_1000000 0 72 1.09
10 33_1200000 0 73 1.09
10 9a_1100000 0 74 1.09
10 9a_1000000 0 75 1.09
10 9a_800000 0 76 1.09
10 9a_700000 0.006560258 77 1.09
10 9a_600000 0.011666343 78 1.10
10 9a_500000 0.003884181 79 1.11
10 9a_400000 0.009968502 80 1.11
10 9a_300000 0 81 1.12
10 9a_200000 0.003632167 82 1.12
10 156_100000 0 83 1.13
10 156_500000 0.004722376 84 1.13
10 50_0 0 85 1.13
10 50_100000 0.005401497 86 1.13
10 50_200000 0.021387167 87 1.14
10 50_300000 0 88 1.16
10 50_400000 0 89 1.16
10 50_500000 0 90 1.16
10 50_800000 0.022861094 91 1.16
10 50_1200000 0 92 1.18
10 50_1300000 0.010673609 93 1.18
10 50_1400000 0 94 1.19
10 209_300000 0.00934116 95 1.19
10 209_100000 0 96 1.20
10 209_0 0.008390192 97 1.20
10 490_0 0 98 1.21
10 193a_300000 0 99 1.21
10 193a_200000 0 100 1.21
10 193a_100000 0.008356463 101 1.21
10 193a_0 0 102 1.22
10 125b_600000 0 103 1.22
10 125b_500000 0 104 1.22
10 125b_400000 0.009759166 105 1.22
10 125b_300000 0 106 1.23
10 125b_200000 0.009595492 107 1.23
10 125b_100000 0 108 1.24
10 125b_0 0 109 1.24
10 188_500000 0.037651929 110 1.24
10 188_400000 0.007290581 111 1.27
10 188_300000 0 112 1.28
10 188_200000 0.003426334 113 1.28
10 311_0 0 114 1.28
10 204_0 0.008599231 115 1.28
10 204_100000 0.005825985 116 1.29
10 204_200000 0.002442358 117 1.30
10 204_300000 0.023953369 118 1.30
10 204_400000 0 119 1.32
10 87_0 0.004468119 120 1.32
10 87_100000 0.044177298 121 1.33
10 87_200000 0.010252702 122 1.37
10 87_300000 0.002550662 123 1.38
10 87_400000 0.003624761 124 1.39
10 87_500000 0.023480694 125 1.39
10 87_600000 0.003284104 126 1.41
10 87_700000 0 127 1.42
10 87_800000 0 128 1.42
10 87_900000 0.003583831 129 1.42
10 87_1000000 0 130 1.42
10 159_0 0.003274393 131 1.42
10 159_100000 0.007101358 132 1.42
10 159_200000 0 133 1.43
10 159_300000 0 134 1.43
10 159_400000 0 135 1.43
10 159_500000 136 1.43
11 75_0 0 137 0.00
11 75_100000 0 138 0.00
11 75_200000 0.039059377 139 0.00
11 75_300000 0.009650554 140 0.04
11 75_400000 0 141 0.05
11 75_500000 0.008703283 142 0.05
11 75_600000 0.021322528 143 0.06
11 75_700000 0.012939132 144 0.08
11 75_800000 0.015799759 145 0.09
11 75_900000 0.007494006 146 0.11
11 75_1000000 0.008773985 147 0.11
11 75_1100000 0.038889991 148 0.12
11 228_0 0.018836105 149 0.16
11 228_100000 0.013074528 150 0.18
11 228_200000 0.002773953 151 0.19
11 228_300000 0 152 0.20
11 273b_0 0.024469466 153 0.20
11 273b_100000 0 154 0.22
11 213_0 0 155 0.22
11 213_100000 0.006354686 156 0.22
11 213_200000 0 157 0.23
11 213_300000 0.004349377 158 0.23
11 213_400000 0.044369257 159 0.23
11 63a_0 0.007413258 160 0.28
11 63a_100000 0.021821893 161 0.28
11 63a_200000 0.003741362 162 0.31
11 257b_100000 0 163 0.31
11 257b_0 0.03021103 164 0.31
11 30_1900000 0 165 0.34
11 30_1800000 0 166 0.34
11 30_1700000 0.009192108 167 0.34
11 30_1600000 0 168 0.35
11 30_1500000 0.013425292 169 0.35
11 30_1400000 0.004150274 170 0.36
11 30_1300000 0 171 0.37
11 30_1200000 0.029084231 172 0.37
11 30_1100000 0.019292453 173 0.40
11 48b_600000 0 174 0.42
11 48b_500000 0.048700604 175 0.42
11 48b_400000 0.003148245 176 0.46
11 48b_300000 0.044815635 177 0.47
11 48b_100000 0.033403146 178 0.51
11 182_0 0.011806448 179 0.55
11 182_200000 0 180 0.56
11 182_300000 0.000796357 181 0.56
11 182_400000 0.026685106 182 0.56
11 185_200000 0 183 0.58
11 185_100000 0.004700829 184 0.58
11 185_0 0.004641543 185 0.59
11 39b_200000 0.004494729 186 0.59
11 39b_300000 0.006085184 187 0.60
11 39b_500000 0 188 0.60
11 39b_600000 0.007319889 189 0.60
11 47b_100000 0.02306196 190 0.61
11 47b_200000 0 191 0.63
11 47b_300000 0.02489136 192 0.63
11 47b_900000 0 193 0.66
11 161_0 0.002731847 194 0.66
11 161_100000 0 195 0.66
11 161_200000 0 196 0.66
11 161_300000 0 197 0.66
11 161_400000 0 198 0.66
11 161_500000 0 199 0.66
11 131_700000 0 200 0.66
11 131_600000 0 201 0.66
11 49_200000 0 202 0.66
11 49_300000 0 203 0.66
11 49_500000 0 204 0.66
11 49_600000 0 205 0.66
11 49_1000000 0.002811045 206 0.66
11 49_1100000 0 207 0.67
11 49_1200000 0.002816604 208 0.67
11 49_1300000 0 209 0.67
11 167_200000 0 210 0.67
11 167_100000 0 211 0.67
11 598_0 0 212 0.67
11 22a_0 0 213 0.67
11 239_100000 0 214 0.67
11 239_0 0 215 0.67
11 100_800000 0 216 0.67
11 100_700000 0 217 0.67
11 100_600000 0 218 0.67
11 100_500000 0 219 0.67
11 100_400000 0 220 0.67
11 100_300000 0 221 0.67
11 100_100000 0.002288913 222 0.67
11 100_0 0.00419512 223 0.67
11 6b_1300000 0 224 0.67
11 6b_1200000 0.00412357 225 0.67
11 6b_1000000 0.019631061 226 0.68
11 6b_900000 0.080807147 227 0.70
11 6b_800000 0.089953572 228 0.78
11 6b_0 0.124096185 229 0.87
11 145_0 0.043315583 230 0.99
11 779_0 0 231 1.04
11 162_400000 0 232 1.04
11 162_300000 0 233 1.04
11 162_200000 0 234 1.04
11 162_100000 0.010600757 235 1.04
11 162_0 0 236 1.05
11 221a_0 0.006738661 237 1.05
11 221a_100000 0 238 1.05
11 22b_0 0.079528147 239 1.05
11 22b_200000 0.010647021 240 1.13
11 22b_300000 0.003004047 241 1.14
11 22b_400000 0.00558309 242 1.15
11 22b_500000 0 243 1.15
11 22b_600000 0.002452696 244 1.15
11 22b_700000 0.062585857 245 1.16
11 22b_800000 0.009011901 246 1.22
11 22b_900000 0.00626378 247 1.23
11 22b_1000000 0 248 1.23
11 22b_1100000 0 249 1.23
11 22b_1200000 250 1.23
12 363_0 0.023358097 251 0.00
12 58b_700000 0.006907336 252 0.02
12 58b_600000 0.013626249 253 0.03
12 58b_500000 0.033521327 254 0.04
12 58b_400000 0 255 0.08
12 58b_300000 0.003978177 256 0.08
12 58b_200000 0.017503985 257 0.08
12 220_400000 0.032071429 258 0.10
12 220_100000 0.00361701 259 0.13
12 220_0 0.018585505 260 0.13
12 336_100000 0.074442195 261 0.15
12 336_0 0 262 0.23
12 132_700000 0.004501426 263 0.23
12 132_600000 0 264 0.23
12 132_500000 0.021949675 265 0.23
12 132_400000 0 266 0.25
12 132_300000 0.003125651 267 0.25
12 132_200000 0 268 0.26
12 132_100000 0 269 0.26
12 132_0 0.002249039 270 0.26
12 224_300000 0 271 0.26
12 224_100000 0.057327144 272 0.26
12 224_0 0.006618399 273 0.32
12 785_0 0 274 0.32
12 380_100000 0.010141596 275 0.32
12 332_0 0.033419781 276 0.33
12 349_0 0.075259766 277 0.37
12 287_0 0.053143944 278 0.44
12 201_0 0.014226839 279 0.50
12 201_200000 0.015851743 280 0.51
12 201_300000 0.049069912 281 0.53
12 305_100000 0 282 0.57
12 91_200000 0.023193473 283 0.57
12 91_300000 0 284 0.60
12 91_500000 0 285 0.60
12 91_600000 0 286 0.60
12 91_800000 0.004346006 287 0.60
12 31_1400000 0.004400857 288 0.60
12 31_1200000 0 289 0.61
12 31_800000 0 290 0.61
12 31_700000 0 291 0.61
12 31_600000 0 292 0.61
12 31_500000 0 293 0.61
12 31_200000 0.019180845 294 0.61
12 241_300000 0.013257795 295 0.63
12 241_200000 0.004091418 296 0.64
12 241_100000 0 297 0.64
12 78_1000000 0.003928263 298 0.64
12 114a_200000 0 299 0.65
12 160_300000 0 300 0.65
12 160_200000 0.027912889 301 0.65
12 160_100000 0.044150137 302 0.67
12 577_0 0.010093983 303 0.72
12 497_0 0.170743595 304 0.73
12 37_1600000 0 305 0.90
12 37_1500000 0 306 0.90
12 37_1400000 0.019811269 307 0.90
12 37_1300000 0.012101702 308 0.92
12 37_1200000 0.032409736 309 0.93
12 37_1100000 0.03597627 310 0.96
12 37_1000000 0.017531988 311 1.00
12 37_900000 0.031151788 312 1.02
12 37_800000 0.006593155 313 1.05
12 37_700000 0 314 1.06
12 37_600000 0.005551033 315 1.06
12 37_500000 0.002969761 316 1.06
12 37_400000 0 317 1.06
12 37_300000 0.036579577 318 1.06
12 37_200000 0.032988285 319 1.10
12 37_100000 0.006105484 320 1.13
12 37_0 0.006210569 321 1.14
12 905_0 0.003269819 322 1.15
12 334_0 0.003611949 323 1.15
12 334_100000 0.00370013 324 1.15
12 297_200000 0 325 1.16
12 297_100000 0.007638309 326 1.16
12 297_0 0.019291842 327 1.16
12 39a_800000 0.019899557 328 1.18
12 39a_700000 0 329 1.20
12 39a_600000 0.036065416 330 1.20
12 39a_400000 0.003753697 331 1.24
12 39a_300000 0.005642526 332 1.24
12 39a_200000 0.006049738 333 1.25
12 39a_100000 0.011851757 334 1.25
12 39a_0 0.002508276 335 1.27
12 17b_0 0 336 1.27
12 17b_100000 0.012860847 337 1.27
12 17b_200000 0.008094308 338 1.28
12 17b_300000 0 339 1.29
12 17b_400000 0.021254686 340 1.29
12 17b_500000 0.019493292 341 1.31
12 17b_600000 0.002631564 342 1.33
12 17b_700000 0.002897668 343 1.33
12 17b_800000 0.094016474 344 1.34
12 17b_900000 0.00206658 345 1.43
12 17b_1000000 0.003212803 346 1.43
12 17b_1100000 0.008726671 347 1.44
12 17b_1200000 0.004780564 348 1.44
12 44b_0 0.004497997 349 1.45
12 44b_100000 0.0032831 350 1.45
12 44b_200000 0.012481804 351 1.46
12 44b_300000 0 352 1.47
12 44b_400000 0 353 1.47
12 44b_500000 0.03330604 354 1.47
12 683_0 355 1.50
13 122_0 0 356 0.00
13 122_100000 0.023987281 357 0.00
13 122_200000 0 358 0.02
13 122_300000 0 359 0.02
13 122_400000 0.010384493 360 0.02
13 122_500000 0.005290885 361 0.03
13 122_600000 0.104667562 362 0.04
13 235_300000 0.032465588 363 0.14
13 235_200000 0 364 0.18
13 235_0 0.012304489 365 0.18
13 52_0 0.031722243 366 0.19
13 52_100000 0 367 0.22
13 52_200000 0.004705459 368 0.22
13 52_300000 0.015956935 369 0.23
13 52_400000 0 370 0.24
13 52_500000 0.030495839 371 0.24
13 52_800000 0.003867253 372 0.27
13 52_1000000 0 373 0.28
13 52_1100000 0.00485438 374 0.28
13 52_1200000 0 375 0.28
13 52_1300000 0 376 0.28
13 52_1400000 0.025400007 377 0.28
13 390_0 0.072563035 378 0.31
13 129b_200000 0 379 0.38
13 129b_300000 0.02461875 380 0.38
13 129b_400000 0.050561091 381 0.40
13 329_100000 0.017048567 382 0.45
13 329_0 0.012193155 383 0.47
13 139_600000 0 384 0.48
13 139_400000 0 385 0.48
13 139_300000 0.018773369 386 0.48
13 139_100000 0 387 0.50
13 139_0 0.110392925 388 0.50
13 34_1000000 0 389 0.61
13 34_1200000 0 390 0.61
13 34_1300000 0 391 0.61
13 34_1400000 0 392 0.61
13 34_1500000 0.003868157 393 0.61
13 34_1600000 0.052184771 394 0.62
13 138_200000 0.024564269 395 0.67
13 225_300000 0.026259732 396 0.69
13 225_200000 0.004407763 397 0.72
13 107_100000 0 398 0.72
13 107_200000 0 399 0.72
13 107_300000 0 400 0.72
13 107_400000 0.006670889 401 0.72
13 107_500000 0.025779536 402 0.73
13 107_800000 0 403 0.76
13 402_0 0.002326566 404 0.76
13 230_100000 0.000228207 405 0.76
13 230_200000 0.002025981 406 0.76
13 230_300000 0.005169447 407 0.76
13 174_100000 0 408 0.77
13 174_200000 0 409 0.77
13 174_400000 0.020997558 410 0.77
13 344_0 0.005433276 411 0.79
13 108_900000 0.004724913 412 0.79
13 108_800000 0.003929682 413 0.80
13 108_700000 0.005303497 414 0.80
13 108_600000 0.015640849 415 0.81
13 108_500000 0.055468537 416 0.82
13 108_400000 0.017525948 417 0.88
13 108_300000 0.006124365 418 0.89
13 108_200000 0.010705788 419 0.90
13 245_300000 0 420 0.91
13 245_200000 0.044517507 421 0.91
13 245_100000 0.037078072 422 0.96
13 4b_3800000 0.017017798 423 0.99
13 4b_3700000 0 424 1.01
13 4b_3600000 0.003910372 425 1.01
13 4b_3500000 0.020119986 426 1.01
13 4b_3400000 0.053641944 427 1.03
13 4b_3300000 0 428 1.09
13 4b_3100000 0 429 1.09
13 4b_3000000 0.006981263 430 1.09
13 4b_2900000 0.073164579 431 1.09
13 4b_2800000 0.006309513 432 1.17
13 4b_2700000 0 433 1.17
13 4b_2600000 0.00551761 434 1.17
13 4b_2500000 0 435 1.18
13 4b_2400000 0 436 1.18
13 4b_2300000 0.004243659 437 1.18
13 4b_2200000 0.00894899 438 1.18
13 4b_2100000 0.006592286 439 1.19
13 4b_2000000 0.002768579 440 1.20
13 4b_1900000 0.049312169 441 1.20
13 4b_1800000 0.004699424 442 1.25
13 4b_1700000 0 443 1.26
13 4b_1600000 0.012720136 444 1.26
13 4b_1500000 0.041338891 445 1.27
13 4b_1400000 0.008293402 446 1.31
13 4b_1300000 0.020502778 447 1.32
13 4b_1200000 0.015187241 448 1.34
13 4b_1100000 0.012341158 449 1.35
13 4b_1000000 0.027274704 450 1.37
13 4b_900000 0.007271857 451 1.39
13 4b_800000 0.002839539 452 1.40
13 4b_600000 0 453 1.40
13 4b_500000 0.035716366 454 1.40
13 4b_400000 0.02662962 455 1.44
13 4b_300000 0.029373225 456 1.47
13 4b_200000 0.009604436 457 1.50
13 4b_100000 0.020698176 458 1.51
13 4b_0 0 459 1.53
13 83b_800000 0 460 1.53
13 83b_700000 0.009048568 461 1.53
13 83b_600000 0.00609112 462 1.54
13 83b_500000 0.003436726 463 1.54
13 83b_400000 0.00472593 464 1.54
13 83b_300000 0 465 1.55
13 83b_200000 0.007304636 466 1.55
13 83b_0 0.00278397 467 1.56
13 115a_300000 0 468 1.56
13 115a_200000 0.00289715 469 1.56
13 115a_100000 0 470 1.56
13 115a_0 471 1.56
14 102_0 0.005733521 472 0.00
14 102_100000 0 473 0.01
14 102_200000 0 474 0.01
14 102_300000 0 475 0.01
14 102_400000 0.00402354 476 0.01
14 102_500000 0.012107676 477 0.01
14 102_600000 0.014039635 478 0.02
14 102_800000 0 479 0.04
14 102_900000 0.006636173 480 0.04
14 6c_0 0.016665354 481 0.04
14 6c_100000 0 482 0.06
14 26a_0 0.00996436 483 0.06
14 26a_100000 0.01796039 484 0.07
14 26a_200000 0 485 0.09
14 26a_300000 0.013917143 486 0.09
14 26a_400000 0 487 0.10
14 26a_500000 0.003912587 488 0.10
14 26a_600000 0.005311603 489 0.10
14 26a_700000 0.00340967 490 0.11
14 26a_800000 0.008954822 491 0.11
14 26a_900000 0.044193958 492 0.12
14 26a_1000000 0.021537534 493 0.17
14 26a_1100000 0.00609575 494 0.19
14 26a_1200000 0.004731162 495 0.19
14 26a_1300000 0.011714314 496 0.20
14 26a_1400000 0.021927818 497 0.21
14 26a_1500000 0.003125125 498 0.23
14 278_0 0 499 0.24
14 278_100000 0.022819702 500 0.24
14 148_200000 0.014411922 501 0.26
14 148_300000 0.016179654 502 0.27
14 148_400000 0.00587718 503 0.29
14 148_500000 0 504 0.30
14 148_600000 0.005607944 505 0.30
14 198_0 0 506 0.30
14 198_100000 0 507 0.30
14 198_200000 0.077558245 508 0.30
14 101_200000 0 509 0.38
14 101_100000 0.008133208 510 0.38
14 101_0 0.25 511 0.39
14 123_400000 0.006216516 512 0.64
14 123_300000 0.019461883 513 0.64
14 123_200000 0 514 0.66
14 123_100000 0.003371498 515 0.66
14 123_0 0.013110534 516 0.67
14 15_300000 0.041279844 517 0.68
14 15_1100000 0.018164317 518 0.72
14 15_2500000 0 519 0.74
14 127_300000 0 520 0.74
14 127_200000 0.016425355 521 0.74
14 127_0 0.057845417 522 0.75
14 211_100000 0.053613669 523 0.81
14 361_100000 0.16035582 524 0.87
14 195_400000 0.051580909 525 1.03
14 323_0 0.017882688 527 1.08
14 323_100000 0.041955479 528 1.10
14 128_300000 0 529 1.14
14 128_400000 0.002943915 530 1.14
14 128_600000 0.01479946 531 1.14
14 178_0 0.004756551 532 1.16
14 178_100000 0.003170396 533 1.16
14 178_300000 0 534 1.16
14 178_400000 0.015494317 535 1.16
14 140b_400000 0.011227552 536 1.18
14 140b_100000 0.006200046 537 1.19
14 92_0 0.009120025 538 1.20
14 92_100000 0.003442665 539 1.21
14 92_200000 0 540 1.21
14 92_300000 0.009116163 541 1.21
14 92_400000 0.018719045 542 1.22
14 92_500000 0 543 1.24
14 92_600000 0.024215198 544 1.24
14 92_700000 0.02443871 545 1.26
14 92_800000 0 546 1.29
14 92_900000 0.004088663 547 1.29
14 92_1000000 0.007874928 548 1.29
14 1131_0 0.008036438 549 1.30
14 290_0 0 550 1.31
14 290_100000 0 551 1.31
14 290_200000 0.010318575 552 1.31
14 2_0 0 553 1.32
14 2_100000 0.016078676 554 1.32
14 2_200000 0.063132208 555 1.33
14 2_300000 0 556 1.39
14 2_400000 0 557 1.39
14 2_600000 0.005796377 558 1.39
14 2_700000 0 559 1.40
14 2_800000 0.005426948 560 1.40
14 2_900000 0.019608401 561 1.41
14 2_1000000 0.038974648 562 1.43
14 2_1100000 0.002615819 563 1.46
14 2_1200000 0.026722073 564 1.47
14 2_1300000 0.181928293 565 1.49
14 2_2800000 0.00031036 566 1.68
14 2_3000000 0.03713601 567 1.68
14 2_3200000 0 568 1.71
14 2_3500000 0.02781336 569 1.71
14 2_4000000 0.025402873 570 1.74
14 2_4100000 0.003807225 571 1.77
14 2_4200000 0.00636014 572 1.77
14 2_4300000 0.006041501 573 1.78
14 2_4400000 0.023034067 574 1.78
14 2_4500000 0.016378913 575 1.81
14 58a_0 0 576 1.82
14 58a_100000 0.00449779 577 1.82
14 58a_200000 0.009785237 578 1.83
14 58a_300000 0 579 1.84
14 58a_400000 0.006088577 580 1.84
14 396_0 581 1.84
1 853_0 0.071482486 582 0.00
1 79_0 0 583 0.07
1 79_100000 0.004508496 584 0.07
1 79_200000 0.030911267 585 0.08
1 79_300000 0.028525552 586 0.11
1 79_400000 0.031033936 587 0.14
1 79_600000 0 588 0.17
1 79_700000 0 589 0.17
1 79_800000 0.013021107 590 0.17
1 79_900000 0.007064129 591 0.18
1 79_1000000 0.021330968 592 0.19
1 69_0 0.009276198 593 0.21
1 69_100000 0.011338286 594 0.22
1 69_200000 0 595 0.23
1 69_300000 0.011730574 596 0.23
1 69_500000 0 597 0.24
1 69_600000 0.026886799 598 0.24
1 69_800000 0 599 0.27
1 69_900000 0.018183113 600 0.27
1 69_1100000 0 601 0.29
1 69_1200000 0.027637807 602 0.29
1 181_0 0.019479707 603 0.31
1 181_100000 0.031293989 604 0.33
1 181_200000 0.015431142 605 0.36
1 181_300000 0.010159246 606 0.38
1 559_0 0.011006571 607 0.39
1 60_0 0 608 0.40
1 60_100000 0.038761901 609 0.40
1 60_200000 0 610 0.44
1 60_300000 0.00321247 611 0.44
1 60_400000 0.00293033 612 0.44
1 60_500000 0.002955842 613 0.45
1 60_600000 0 614 0.45
1 60_700000 0.005287463 615 0.45
1 60_800000 0.048927686 616 0.45
1 60_900000 0 617 0.50
1 60_1000000 0.034959382 618 0.50
1 60_1100000 0.003829562 619 0.54
1 60_1200000 0 620 0.54
1 60_1300000 0.049175621 621 0.54
1 60_1400000 0.001921092 622 0.59
1 333_0 0 623 0.59
1 333_100000 0.00277684 624 0.59
1 165_0 0 625 0.60
1 165_100000 0.027710606 626 0.60
1 165_200000 0.012989493 627 0.62
1 165_300000 0.020866832 628 0.64
1 362_100000 0.042949034 629 0.66
1 308_100000 0.016139647 630 0.70
1 26b_400000 0.003956818 631 0.72
1 26b_300000 0 632 0.72
1 26b_200000 0.007521211 633 0.72
1 26b_100000 4.11E-005 634 0.73
1 1184_0 0.003854865 635 0.73
1 240_100000 0 636 0.73
1 314_0 0.004863706 637 0.73
1 314_100000 0.016366903 638 0.74
1 82_1000000 0 639 0.75
1 82_800000 0 640 0.75
1 82_700000 0 641 0.75
1 82_600000 0.00558596 642 0.75
1 82_300000 0.015420939 643 0.76
1 82_200000 0.018073243 644 0.77
1 82_100000 0.004443197 645 0.79
1 82_0 0.00578715 646 0.80
1 115b_0 0.006030446 647 0.80
1 115b_100000 0 648 0.81
1 115b_200000 0 649 0.81
1 115b_300000 0 650 0.81
1 232_100000 0 651 0.81
1 232_300000 0 652 0.81
1 83a_100000 0 653 0.81
1 187_0 0 654 0.81
1 146_0 0 655 0.81
1 563_0 0 656 0.81
1 74_400000 0.002318879 657 0.81
1 74_1100000 0.002341044 658 0.81
1 80a_0 0.007372249 659 0.81
1 80a_300000 0 660 0.82
1 80a_400000 0 661 0.82
1 80a_500000 0.00365688 662 0.82
1 80a_600000 0.022048952 663 0.82
1 80a_700000 0.012499747 664 0.85
1 80a_800000 665 0.86
2 18_2100000 0 666 0.00
2 18_2000000 0 667 0.00
2 18_1900000 0 668 0.00
2 18_1800000 0.025707027 669 0.00
2 18_1600000 0.004086783 670 0.03
2 18_1500000 0 671 0.03
2 18_1400000 0.009238622 672 0.03
2 18_1300000 0.037685633 673 0.04
2 18_1200000 0.058444735 674 0.08
2 18_1100000 0 675 0.14
2 18_1000000 0.010501117 676 0.14
2 18_900000 0 677 0.15
2 18_800000 0.003287833 678 0.15
2 18_600000 0.016261324 679 0.15
2 18_500000 0.038008384 680 0.17
2 18_200000 0.026623276 681 0.20
2 18_100000 0.011188068 682 0.23
2 18_0 0 683 0.24
2 89_0 0.002835382 684 0.24
2 89_100000 0.009299373 685 0.24
2 89_200000 0.019544271 686 0.25
2 89_400000 0.016065092 687 0.27
2 89_500000 0.007497752 688 0.29
2 89_600000 0.018423131 689 0.30
2 89_700000 0.004426888 690 0.31
2 89_800000 0.017964313 691 0.32
2 89_900000 0.022157879 692 0.34
2 89_1000000 0.006576512 693 0.36
2 44a_700000 0 694 0.37
2 44a_600000 0.00279775 695 0.37
2 44a_500000 0.002760195 696 0.37
2 44a_400000 0.022453213 697 0.37
2 44a_0 0.004717146 698 0.39
2 212_0 0.005222741 699 0.40
2 212_100000 0.013744885 700 0.40
2 212_200000 0.009114538 701 0.42
2 212_300000 0.007384611 702 0.43
2 212_400000 0.009909301 703 0.43
2 249_0 0.013141912 704 0.44
2 249_100000 0.008787201 705 0.46
2 249_200000 0.005872505 706 0.47
2 81_100000 0.006638697 707 0.47
2 81_200000 0 708 0.48
2 81_300000 0.013990945 709 0.48
2 81_400000 0.04435441 710 0.49
2 81_900000 0.093935028 711 0.54
2 19_0 0.043576163 712 0.63
2 19_200000 0.011451947 713 0.67
2 19_300000 0 714 0.69
2 19_400000 0.002707212 715 0.69
2 19_600000 0 716 0.69
2 19_700000 0 717 0.69
2 19_800000 0 718 0.69
2 19_1000000 0 719 0.69
2 19_1100000 0 720 0.69
2 19_1200000 0 721 0.69
2 19_1300000 0.005574053 722 0.69
2 112_100000 0 723 0.69
2 112_200000 0 724 0.69
2 112_800000 0 725 0.69
2 216_300000 0 726 0.69
2 216_200000 0 727 0.69
2 216_0 0.018242143 728 0.69
2 73_1200000 0 729 0.71
2 73_1100000 0 730 0.71
2 73_800000 0 731 0.71
2 73_700000 0 732 0.71
2 65_0 0 733 0.71
2 65_300000 0 734 0.71
2 65_400000 0.001917697 735 0.71
2 65_500000 0 736 0.71
2 65_600000 0.007937676 737 0.71
2 65_700000 0.017074703 738 0.72
2 65_1000000 0.033136504 739 0.74
2 42_100000 0 740 0.77
2 42_200000 0 741 0.77
2 42_300000 0.003037972 742 0.77
2 42_400000 5.03E-005 743 0.78
2 42_600000 0 744 0.78
2 42_700000 0.039501679 745 0.78
2 42_1000000 0.010254242 746 0.81
2 42_1500000 0 747 0.83
2 42_1600000 0 748 0.83
2 173_500000 0 749 0.83
2 173_400000 0 750 0.83
2 173_300000 0.012009831 751 0.83
2 173_200000 0.031267489 752 0.84
2 173_100000 6.65E-005 753 0.87
2 173_0 0.023361984 754 0.87
2 41a_0 0.001575723 755 0.89
2 41a_100000 0.023093476 756 0.89
2 41a_200000 0 757 0.92
2 41a_300000 0 758 0.92
2 41a_400000 0.002544028 759 0.92
2 41a_500000 0.001247535 760 0.92
2 41a_600000 0 761 0.92
2 41a_700000 0 762 0.92
2 41a_800000 0.00579245 763 0.92
2 41a_1100000 0 764 0.93
2 41a_1200000 0.027465878 765 0.93
2 41a_1300000 0.012893585 766 0.95
2 41a_1400000 0.010586128 767 0.97
2 41a_1500000 0 768 0.98
2 151_0 0 769 0.98
2 151_100000 0 770 0.98
2 151_200000 0 771 0.98
2 151_300000 0.002336063 772 0.98
2 151_400000 0 773 0.98
2 151_500000 0.039612141 774 0.98
2 429_0 0.079752804 775 1.02
2 27_0 0.001844282 776 1.10
2 27_100000 0.048763913 777 1.10
2 27_200000 0.101752454 778 1.15
2 27_1000000 0 779 1.25
2 27_1100000 0.012306775 780 1.25
2 27_1200000 0.011759496 781 1.26
2 27_1300000 0.025521609 782 1.28
2 27_1400000 0.002487179 783 1.30
2 27_1500000 0 784 1.30
2 27_1600000 0 785 1.30
2 27_1700000 0 786 1.30
2 27_1800000 0.003088523 787 1.30
2 27_1900000 0.01114926 788 1.31
2 27_2000000 789 1.32
3 316_100000 0.028304594 790 0.00
3 316_0 0 791 0.03
3 137_400000 0 792 0.03
3 137_300000 0.008130243 793 0.03
3 137_200000 0 794 0.04
3 55a_200000 0.040381866 795 0.04
3 116_800000 0.004024395 796 0.08
3 116_700000 0 797 0.08
3 116_600000 0 798 0.08
3 116_400000 0 799 0.08
3 116_300000 0.034426601 800 0.08
3 116_100000 0.014167844 801 0.12
3 116_0 0.035555125 802 0.13
3 62_1300000 0.037752943 803 0.16
3 62_900000 0 804 0.20
3 62_600000 0 805 0.20
3 35_0 0.007633423 806 0.20
3 35_400000 0.00246404 807 0.21
3 23_1600000 0 808 0.21
3 23_1200000 0.019868057 809 0.21
3 23_900000 0 810 0.23
3 23_800000 0 811 0.23
3 23_700000 0 812 0.23
3 23_600000 0 813 0.23
3 23_300000 0 814 0.23
3 23_200000 0.007898275 815 0.23
3 265_0 0.008491516 816 0.24
3 265_100000 0.016781138 817 0.25
3 265_200000 0.012494253 818 0.27
3 70b_600000 0.026381735 819 0.28
3 5a_1700000 0.053237862 820 0.30
3 5a_1600000 0 821 0.36
3 5a_1500000 0.021789752 822 0.36
3 5a_1300000 0.070299936 823 0.38
3 5a_1200000 0.023777527 824 0.45
3 5a_1100000 0.056882039 825 0.47
3 5a_1000000 0.06700235 826 0.53
3 5a_900000 0.017688091 827 0.60
3 5a_700000 0.025371049 828 0.62
3 5a_600000 0.013161003 829 0.64
3 5a_500000 0 830 0.65
3 5a_400000 0.058460794 831 0.65
3 5a_300000 0.002744818 832 0.71
3 5a_200000 0.016282879 833 0.72
3 5a_0 0.041045722 834 0.73
3 106a_0 0.068454836 835 0.77
3 106a_200000 0.026416175 836 0.84
3 106a_300000 0.037872269 837 0.87
3 106a_400000 0.02196133 838 0.91
3 106a_500000 0.011422214 839 0.93
3 106a_700000 0 840 0.94
3 258_300000 0.003047539 841 0.94
3 258_200000 0.013044001 842 0.94
3 258_100000 0 843 0.95
3 258_0 0.019708206 844 0.95
3 169_500000 0.035009053 845 0.97
3 169_400000 0.012066934 846 1.01
3 169_300000 0.010147781 847 1.02
3 169_200000 0.003388658 848 1.03
3 169_100000 0.049893344 849 1.04
3 169_0 0.012864822 850 1.08
3 98_1000000 0.006360732 851 1.10
3 98_900000 0.003169249 852 1.10
3 98_800000 0.01126206 853 1.11
3 98_700000 0.025091049 854 1.12
3 98_500000 0.005308878 855 1.14
3 98_400000 0.003875002 856 1.15
3 98_300000 0 857 1.15
3 98_200000 0.024807007 858 1.15
3 98_100000 0 859 1.18
3 98_0 0 860 1.18
3 261_0 0.01182839 861 1.18
3 261_100000 0.018561553 862 1.19
3 261_200000 0.003314974 863 1.21
3 273a_0 0 864 1.21
3 64_1300000 0.012689254 865 1.21
3 64_1200000 0.029669354 866 1.22
3 64_1100000 0.002382125 867 1.25
3 64_1000000 0.005898219 868 1.26
3 64_900000 0.018991879 869 1.26
3 64_800000 0.004888684 870 1.28
3 64_700000 0.002809169 871 1.29
3 64_600000 0.02799968 872 1.29
3 64_500000 0.035091132 873 1.32
3 64_400000 0.003914272 874 1.35
3 64_300000 0.017109309 875 1.36
3 64_200000 0 876 1.37
3 64_100000 0 877 1.37
3 64_0 878 1.37
4 1_4900000 0.101907112 879 0.00
4 1_4800000 0.00020192 880 0.10
4 1_4600000 0 881 0.10
4 1_4500000 0.00582006 882 0.10
4 1_4400000 0.013300917 883 0.11
4 1_4300000 0.041085868 884 0.12
4 1_4200000 0 885 0.16
4 1_4100000 0 886 0.16
4 1_4000000 0.047778868 887 0.16
4 1_3800000 0.002755532 888 0.21
4 1_3700000 0 889 0.21
4 1_3600000 0.00311657 890 0.21
4 1_3500000 0.025263758 891 0.22
4 1_3300000 0.014201175 892 0.24
4 1_3200000 0 893 0.26
4 1_3100000 0.005160042 894 0.26
4 1_3000000 0.044579383 895 0.26
4 1_2900000 0.003097471 896 0.31
4 1_2800000 0.009223568 897 0.31
4 1_2700000 0.026902959 898 0.32
4 1_2600000 0 899 0.34
4 1_2500000 0.008624358 900 0.34
4 1_2400000 0.013869916 901 0.35
4 1_2300000 0.001890564 902 0.37
4 1_2200000 0.005941516 903 0.37
4 1_2100000 0 904 0.37
4 1_1900000 0.004546795 905 0.37
4 1_1800000 0.030228914 906 0.38
4 1_1700000 0.003186581 907 0.41
4 1_1600000 0.002971069 908 0.41
4 1_1500000 0 909 0.42
4 1_1400000 0.0055237 910 0.42
4 1_1300000 0 911 0.42
4 1_1200000 0 912 0.42
4 1_1100000 0.010507046 913 0.42
4 1_1000000 0.003101747 914 0.43
4 1_900000 0.011108073 915 0.43
4 1_800000 0.068136738 916 0.45
4 1_600000 0.021733489 917 0.51
4 1_400000 0.008149437 918 0.54
4 1_200000 0.003397094 919 0.54
4 1_100000 0 920 0.55
4 1_0 0.02074373 921 0.55
4 54_1400000 0.002742923 922 0.57
4 54_1300000 0.024585451 923 0.57
4 54_1200000 0 924 0.60
4 54_1100000 0.003195834 925 0.60
4 54_1000000 0 926 0.60
4 54_900000 0.008666488 927 0.60
4 54_800000 0 928 0.61
4 54_700000 0 929 0.61
4 54_600000 0 930 0.61
4 54_500000 0 931 0.61
4 54_400000 0 932 0.61
4 54_200000 0.01128161 933 0.61
4 54_100000 0.008664261 934 0.62
4 54_0 0.004237673 935 0.63
4 120_0 0.017615634 936 0.63
4 120_100000 0 937 0.65
4 120_200000 0 938 0.65
4 120_300000 0 939 0.65
4 120_400000 0.035290347 940 0.65
4 120_600000 0 941 0.68
4 120_700000 0 942 0.68
4 120_800000 0.004593377 943 0.68
4 150_0 0 944 0.69
4 150_100000 0 945 0.69
4 150_200000 0.00907023 946 0.69
4 150_300000 0.041015649 947 0.70
4 57_600000 0 948 0.74
4 57_500000 0 949 0.74
4 57_400000 0.015594176 950 0.74
4 57_300000 0 951 0.75
4 57_100000 0.006755394 952 0.75
4 254a_100000 0.019127282 953 0.76
4 254a_0 0 954 0.78
4 154_500000 0.014826302 955 0.78
4 154_400000 0.002777422 956 0.80
4 154_200000 0.003165502 957 0.80
4 154_100000 0 958 0.80
4 154_0 0 959 0.80
4 110_0 0.008306254 960 0.80
4 110_100000 0 961 0.81
4 110_200000 0.005372204 962 0.81
4 110_400000 0.00500952 963 0.81
4 177_0 0 964 0.82
4 177_100000 0.00591693 965 0.82
4 177_200000 0 966 0.83
4 177_300000 0.004032519 967 0.83
4 177_400000 0.006607949 968 0.83
4 6a_2300000 0 969 0.84
4 6a_2200000 0.011092429 970 0.84
4 6a_2100000 0.008223799 971 0.85
4 6a_2000000 0.004302878 972 0.86
4 6a_1900000 0 973 0.86
4 6a_1800000 0 974 0.86
4 6a_1700000 0.005689606 975 0.86
4 6a_1400000 0 976 0.87
4 6a_1300000 0 977 0.87
4 6a_1200000 0.014550598 978 0.87
4 6a_1000000 0.0049489 979 0.88
4 6a_900000 0 980 0.89
4 6a_800000 0 981 0.89
4 6a_700000 0 982 0.89
4 6a_600000 0.035512829 983 0.89
4 6a_500000 0.013095227 984 0.92
4 6a_300000 0.020156737 985 0.93
4 6a_200000 0.028838091 986 0.95
4 70c_100000 0 987 0.98
4 55b_100000 0 988 0.98
4 55b_200000 0.003297844 989 0.98
4 55b_300000 0.005130263 990 0.99
4 55b_400000 0.006963862 991 0.99
4 55b_500000 0.019003475 992 1.00
4 346_0 0 993 1.02
4 346_100000 0.002611045 994 1.02
4 320_0 0 995 1.02
4 320_100000 0 996 1.02
4 96a_0 0 997 1.02
4 96a_100000 0 998 1.02
4 96a_200000 0.019151816 999 1.02
4 96a_400000 0.020566235 1000 1.04
4 96a_500000 0 1001 1.06
4 96a_600000 0.008369771 1002 1.06
4 96a_700000 0.025487711 1003 1.07
4 96a_800000 0.075821864 1004 1.09
4 85_1100000 0 1005 1.17
4 85_1000000 0.005885823 1006 1.17
4 85_900000 0.044693264 1007 1.18
4 1287_0 0.022453652 1008 1.22
4 196_500000 0.107719626 1009 1.24
4 196_0 0.14354875 1010 1.35
4 638_0 1011 1.49
5 129a_0 0 1012 0.00
5 129a_100000 0 1013 0.00
5 129a_200000 0.004350622 1014 0.00
5 66_0 0.002225986 1015 0.00
5 66_100000 0.016603535 1016 0.01
5 66_200000 0 1017 0.02
5 66_300000 0.03910368 1018 0.02
5 66_400000 0.0392125 1019 0.06
5 66_500000 0 1020 0.10
5 66_600000 0 1021 0.10
5 66_700000 0.044976614 1022 0.10
5 66_800000 0.007111149 1023 0.15
5 66_900000 0.007224943 1024 0.15
5 66_1000000 0 1025 0.16
5 66_1100000 0 1026 0.16
5 66_1200000 0.023112581 1027 0.16
5 32_0 0.009246652 1028 0.18
5 32_100000 0.004510125 1029 0.19
5 32_200000 0.023133393 1030 0.20
5 32_300000 0.003009006 1031 0.22
5 32_400000 0.003416462 1032 0.22
5 32_500000 0.017854312 1033 0.23
5 32_600000 0.013746542 1034 0.25
5 32_800000 0 1035 0.26
5 32_900000 0.007811633 1036 0.26
5 32_1000000 0.021798373 1037 0.27
5 32_1100000 0.018427905 1038 0.29
5 32_1200000 0.026531269 1039 0.31
5 32_1300000 0.028119305 1040 0.33
5 32_1700000 0.043667032 1041 0.36
5 32_1800000 0.010755147 1042 0.41
5 205_0 0.059014976 1043 0.42
5 205_100000 0 1044 0.47
5 205_200000 0.019495165 1045 0.47
5 205_300000 0.078595739 1046 0.49
5 242_300000 0.01426445 1047 0.57
5 242_0 0 1048 0.59
5 461_0 0.021712354 1049 0.59
5 104a_0 0.007463582 1050 0.61
5 104a_100000 0.016653136 1051 0.62
5 104a_200000 0.084534676 1052 0.63
5 244_100000 0.075274328 1053 0.72
5 88_600000 0.117381565 1054 0.79
5 358_0 0.045265634 1055 0.91
5 368_0 0.069473723 1056 0.96
5 326_0 0.005591218 1057 1.03
5 153_500000 0 1058 1.03
5 153_400000 0.007465633 1059 1.03
5 149_200000 0.029220754 1060 1.04
5 516_0 0.006982452 1061 1.07
5 111a_300000 0.035269895 1062 1.07
5 111a_500000 0 1063 1.11
5 111a_600000 0 1064 1.11
5 226_0 0.159488503 1065 1.11
5 226_100000 0.095262853 1066 1.27
5 226_300000 0.031509906 1067 1.36
5 252_300000 0.144595245 1068 1.40
5 197_300000 0.065855848 1069 1.54
5 197_400000 0.004561847 1070 1.61
5 327_0 0 1071 1.61
5 327_100000 0.007358591 1072 1.61
5 170_500000 0.006892212 1073 1.62
5 170_400000 0 1074 1.63
5 170_300000 0.005304264 1075 1.63
5 170_200000 0.030473647 1076 1.63
5 170_100000 0.002035245 1077 1.66
5 170_0 0 1078 1.66
5 158_500000 0.009186097 1079 1.66
5 158_400000 0 1080 1.67
5 158_300000 0.00915008 1081 1.67
5 158_200000 0 1082 1.68
5 158_100000 0 1083 1.68
5 158_0 0 1084 1.68
5 449_0 0 1085 1.68
5 36_0 0.043236219 1086 1.68
5 36_100000 0.01254228 1087 1.72
5 36_200000 0.000870865 1088 1.74
5 36_300000 0 1089 1.74
5 36_400000 0 1090 1.74
5 36_500000 0.014270026 1091 1.74
5 36_600000 0.013604206 1092 1.75
5 36_700000 0.020574581 1093 1.77
5 36_800000 0.030246363 1094 1.79
5 36_900000 0.036758824 1095 1.82
5 36_1100000 0 1096 1.85
5 36_1200000 0.0028183 1097 1.85
5 36_1300000 0 1098 1.86
5 36_1400000 0.002782579 1099 1.86
5 36_1500000 0 1100 1.86
5 36_1600000 0.039559958 1101 1.86
5 36_1700000 0 1102 1.90
5 266_0 0.027148647 1103 1.90
5 266_100000 0.021163248 1104 1.93
5 266_200000 1105 1.95
6 270_200000 0.010668433 1106 0.00
6 270_100000 0.00604853 1107 0.01
6 270_0 0 1108 0.02
6 104b_500000 0 1109 0.02
6 104b_400000 0.002849745 1110 0.02
6 104b_300000 0.009768815 1111 0.02
6 104b_200000 0.006154238 1112 0.03
6 104b_100000 0.0370803 1113 0.04
6 104b_0 0.009056064 1114 0.07
6 51_1400000 0.040343883 1115 0.08
6 51_1300000 0.005571304 1116 0.12
6 51_1200000 0.027516684 1117 0.13
6 51_1100000 0.004526318 1118 0.16
6 51_1000000 0 1119 0.16
6 51_900000 0.007440557 1120 0.16
6 51_800000 0.005517601 1121 0.17
6 51_700000 0 1122 0.17
6 51_600000 0.011055622 1123 0.17
6 51_500000 0.04111463 1124 0.18
6 51_100000 0.018451815 1125 0.22
6 8_3400000 0.022195981 1126 0.24
6 8_3200000 0.003093902 1127 0.27
6 8_3100000 0.006249167 1128 0.27
6 8_3000000 0.010698794 1129 0.27
6 8_2900000 0.004302865 1130 0.29
6 8_2800000 0.023123741 1131 0.29
6 8_2700000 0.003171019 1132 0.31
6 8_2600000 0.036270843 1133 0.32
6 8_2500000 0.015259806 1134 0.35
6 8_2400000 0.003678305 1135 0.37
6 8_2300000 0 1136 0.37
6 8_2200000 0.003593868 1137 0.37
6 8_2000000 0.019520403 1138 0.37
6 8_1900000 0 1139 0.39
6 8_1800000 0 1140 0.39
6 8_1700000 0.04262524 1141 0.39
6 8_1600000 0.020803722 1142 0.44
6 8_1500000 0.056751517 1143 0.46
6 8_1400000 0.001867364 1144 0.51
6 8_1300000 0.017221188 1145 0.52
6 8_1200000 0.001938157 1146 0.53
6 8_1100000 0.056322765 1147 0.54
6 8_1000000 0.013422473 1148 0.59
6 8_900000 0.041002438 1149 0.61
6 8_800000 0.006207585 1150 0.65
6 8_700000 0.003362346 1151 0.65
6 8_600000 0.00878778 1152 0.66
6 8_500000 0 1153 0.66
6 8_400000 0.042580813 1154 0.66
6 8_300000 0.003142665 1155 0.71
6 8_200000 0.007314658 1156 0.71
6 8_100000 0.056506477 1157 0.72
6 293_100000 0.004560962 1158 0.77
6 293_0 0.024248816 1159 0.78
6 379a_0 0 1160 0.80
6 21_0 0.007311452 1161 0.80
6 21_100000 0.020409655 1162 0.81
6 21_200000 0.014480908 1163 0.83
6 21_300000 0.009294005 1164 0.85
6 21_400000 0.017881142 1165 0.85
6 21_600000 0.052229853 1166 0.87
6 21_700000 0.033572713 1167 0.92
6 21_800000 0.029884815 1168 0.96
6 21_1100000 0.029570459 1169 0.99
6 21_1300000 0.01664111 1170 1.02
6 21_1400000 0.025214435 1171 1.03
6 21_1600000 0 1172 1.06
6 21_1700000 0 1173 1.06
6 307_100000 0 1174 1.06
6 136_500000 0 1175 1.06
6 136_400000 0 1176 1.06
6 136_300000 0 1177 1.06
6 136_200000 0 1178 1.06
6 136_0 0.002843819 1179 1.06
6 67_0 0.00275828 1180 1.06
6 67_300000 0 1181 1.07
6 67_400000 0 1182 1.07
6 67_500000 0 1183 1.07
6 67_600000 0.01641274 1184 1.07
6 67_1000000 0 1185 1.08
6 119_100000 0.016404149 1186 1.08
6 119_200000 0 1187 1.10
6 119_300000 0 1188 1.10
6 119_400000 0 1189 1.10
6 119_500000 0 1190 1.10
6 119_600000 0 1191 1.10
6 43_1000000 0 1192 1.10
6 43_1200000 0.007660505 1193 1.10
6 43_1300000 0 1194 1.11
6 43_1400000 0.004154 1195 1.11
6 43_1500000 0.006232545 1196 1.11
6 43_1600000 0.003938401 1197 1.12
6 248_200000 0 1198 1.12
6 248_100000 0 1199 1.12
6 248_0 0.034454855 1200 1.12
6 25_1500000 0.005293162 1201 1.15
6 25_1400000 0.003585192 1202 1.16
6 25_1200000 0 1203 1.16
6 25_1000000 0.018755121 1204 1.16
6 25_800000 0.003758126 1205 1.18
6 25_600000 0 1206 1.19
6 25_500000 0 1207 1.19
6 25_400000 0 1208 1.19
6 25_300000 0.00640051 1209 1.19
6 25_200000 0 1210 1.19
6 25_100000 0.015347357 1211 1.19
6 190_100000 0.012994504 1212 1.21
6 190_200000 0.017383359 1213 1.22
6 190_400000 0 1214 1.24
6 179_300000 0.033718297 1215 1.24
6 179_100000 0.009839058 1216 1.27
6 179_0 0.048182659 1217 1.28
6 262_200000 0 1218 1.33
6 262_0 0.009364928 1219 1.33
6 16_2500000 0.004936421 1220 1.34
6 16_2400000 0.00762006 1221 1.34
6 16_2300000 0.005499059 1222 1.35
6 16_2200000 0.009156493 1223 1.36
6 16_2100000 0.014874391 1224 1.37
6 16_2000000 0.003042834 1225 1.38
6 16_1900000 0.020253826 1226 1.38
6 16_1800000 0 1227 1.40
6 16_1700000 0.008344897 1228 1.40
6 16_1600000 0 1229 1.41
6 16_1500000 0.003372304 1230 1.41
6 16_1400000 0 1231 1.42
6 16_1300000 0.026805941 1232 1.42
6 16_1200000 0 1233 1.44
6 16_1100000 0.017763971 1234 1.44
6 16_1000000 0.021650079 1235 1.46
6 16_900000 0.007840163 1236 1.48
6 16_800000 0.062761908 1237 1.49
6 16_700000 0.002743808 1238 1.55
6 16_600000 0.056393842 1239 1.56
6 16_500000 0.008038535 1240 1.61
6 16_400000 0.00254588 1241 1.62
6 16_300000 0.010276907 1242 1.62
6 16_200000 0 1243 1.63
6 16_100000 1244 1.63
7 2724_0 0.077167005 1245 0.00
7 335_100000 0.067606335 1246 0.08
7 113_600000 0 1247 0.14
7 113_400000 0.009709455 1248 0.14
7 113_300000 0.002953643 1249 0.15
7 113_100000 0 1250 0.16
7 130_800000 0.015838249 1251 0.16
7 130_700000 0 1252 0.17
7 130_600000 0.007951781 1253 0.17
7 130_500000 0.081639359 1254 0.18
7 130_300000 0.209345879 1255 0.26
7 416_0 0.035520872 1256 0.47
7 255_300000 0.114730813 1257 0.51
7 476_0 0 1258 0.62
7 9c_500000 0.026435143 1259 0.62
7 9c_400000 0.018415441 1260 0.65
7 9c_300000 0 1261 0.67
7 9c_200000 0.003260979 1262 0.67
7 9c_100000 0.012771337 1263 0.67
7 114b_0 0.018620941 1264 0.68
7 114b_100000 0 1265 0.70
7 114b_200000 0.028302271 1266 0.70
7 114b_400000 0.012779208 1267 0.73
7 114b_500000 0.012673845 1268 0.74
7 274_100000 0 1269 0.76
7 29_0 0 1270 0.76
7 29_100000 0.021819793 1271 0.76
7 29_200000 0.003492369 1272 0.78
7 29_300000 0 1273 0.78
7 29_400000 0 1274 0.78
7 29_500000 0.006486127 1275 0.78
7 29_600000 0.017999878 1276 0.79
7 29_700000 0 1277 0.81
7 29_800000 0 1278 0.81
7 29_900000 0.021848032 1279 0.81
7 29_1000000 0.054409963 1280 0.83
7 29_1100000 0.007068296 1281 0.88
7 29_1300000 0.008676771 1282 0.89
7 29_1400000 0.011076172 1283 0.90
7 29_1700000 0 1284 0.91
7 166_200000 0.00554132 1285 0.91
7 166_300000 0 1286 0.91
7 166_400000 0.019709143 1287 0.91
7 166_500000 0.06270858 1288 0.93
7 56_1000000 0.074890874 1289 1.00
7 56_1200000 0.148573223 1290 1.07
7 351_0 0.003391816 1291 1.22
7 351_100000 0.018276582 1292 1.22
7 229_100000 0.046085077 1293 1.24
7 14_300000 0.018157991 1294 1.29
7 14_400000 0.005490334 1295 1.31
7 14_500000 0.003324156 1296 1.31
7 14_600000 0.049970858 1297 1.31
7 14_1600000 0 1298 1.36
7 14_1800000 0.006583103 1299 1.36
7 14_1900000 0.011733059 1300 1.37
7 14_2000000 0 1301 1.38
7 14_2600000 0 1302 1.38
7 184_100000 0.019259912 1303 1.38
7 47a_400000 0.035120326 1304 1.40
7 70a_0 1305 1.44
8 109_800000 0 1306 0.00
8 109_700000 0 1307 0.00
8 109_600000 0.005559372 1308 0.00
8 109_500000 0 1309 0.01
8 109_400000 0 1310 0.01
8 109_300000 0.022390881 1311 0.01
8 109_200000 0.011038109 1312 0.03
8 109_100000 0.047306796 1313 0.04
8 233_300000 0.017582971 1314 0.09
8 233_200000 0.096176741 1315 0.10
8 233_100000 0.049563057 1316 0.20
8 233_0 0.002771391 1317 0.25
8 11_0 0 1318 0.25
8 11_100000 0.024861897 1319 0.25
8 11_200000 0.006699572 1320 0.28
8 11_300000 0.024930219 1321 0.28
8 11_400000 0.003329168 1322 0.31
8 11_500000 0.025340472 1323 0.31
8 11_600000 0.036142435 1324 0.34
8 11_700000 0.022028881 1325 0.37
8 11_800000 0.019089579 1326 0.40
8 11_900000 0.021285341 1327 0.41
8 11_1000000 0.003160352 1328 0.44
8 11_1100000 0.0330095 1329 0.44
8 11_1200000 0.003334353 1330 0.47
8 11_1300000 0.002715838 1331 0.48
8 11_1400000 0.001994319 1332 0.48
8 11_1500000 0.031984936 1333 0.48
8 11_1600000 0 1334 0.51
8 11_1700000 0.002806123 1335 0.51
8 11_1800000 0 1336 0.52
8 11_1900000 0 1337 0.52
8 11_2000000 0.008534893 1338 0.52
8 11_2100000 0.002612737 1339 0.52
8 11_2400000 0 1340 0.53
8 11_2500000 0 1341 0.53
8 11_2600000 0.002266746 1342 0.53
8 11_2700000 0.030641197 1343 0.53
8 11_2800000 0.09742027 1344 0.56
8 11_2900000 0.003301584 1345 0.66
8 155_0 0 1346 0.66
8 155_100000 0.031039172 1347 0.66
8 155_200000 0.014298426 1348 0.69
8 155_300000 0.00267261 1349 0.71
8 155_400000 0.024439657 1350 0.71
8 76_300000 0.057825167 1351 0.73
8 76_500000 0 1352 0.79
8 76_600000 0.002576872 1353 0.79
8 76_700000 0.006215305 1354 0.79
8 76_800000 6.06E-005 1355 0.80
8 76_900000 0.003262212 1356 0.80
8 59_0 0 1357 0.80
8 59_100000 0.040650289 1358 0.80
8 59_200000 0 1359 0.84
8 59_300000 0.005652688 1360 0.84
8 59_400000 0 1361 0.85
8 59_600000 0.003566669 1362 0.85
8 59_900000 0.008273017 1363 0.85
8 59_1000000 0 1364 0.86
8 59_1100000 0 1365 0.86
8 59_1200000 0 1366 0.86
8 59_1400000 0.006523203 1367 0.86
8 217_300000 0.040746339 1368 0.87
8 217_100000 0 1369 0.91
8 118_700000 0.002940508 1370 0.91
8 118_600000 0 1371 0.91
8 118_500000 0 1372 0.91
8 118_400000 0 1373 0.91
8 118_300000 0.001120276 1374 0.91
8 118_200000 0 1375 0.91
8 118_100000 0 1376 0.91
8 118_0 0 1377 0.91
8 77_800000 0 1378 0.91
8 77_900000 0 1379 0.91
8 77_1000000 0 1380 0.91
8 38_1400000 0.027153119 1381 0.91
8 412_0 0.019924111 1382 0.94
8 46_1300000 0.007893316 1383 0.96
8 46_800000 0.002451096 1384 0.97
8 46_700000 0.003544492 1385 0.97
8 46_400000 0.06446836 1386 0.97
8 46_300000 0 1387 1.04
8 46_200000 0.018028664 1388 1.04
8 46_100000 0.003989968 1389 1.06
8 46_0 0.002954815 1390 1.06
8 12b_100000 0 1391 1.06
8 12b_200000 0.00126143 1392 1.06
8 12b_300000 0.035098128 1393 1.06
8 12b_400000 0 1394 1.10
8 12b_500000 0.007351233 1395 1.10
8 12b_600000 0 1396 1.11
8 12b_700000 0.011091116 1397 1.11
8 12b_800000 0.004225132 1398 1.12
8 12b_900000 0.011694689 1399 1.12
8 12b_1000000 0.050068664 1400 1.13
8 12b_1100000 0.033467478 1401 1.18
8 12b_1200000 0 1402 1.22
8 12b_1300000 0 1403 1.22
8 12b_1400000 0.011278369 1404 1.22
8 12b_1600000 0 1405 1.23
8 12b_1700000 0.011599833 1406 1.23
8 12b_1800000 0 1407 1.24
8 12b_1900000 0.004187282 1408 1.24
8 12b_2000000 0.013122586 1409 1.24
8 12b_2100000 0.002395782 1410 1.26
8 12b_2200000 0 1411 1.26
8 12b_2300000 0.00411469 1412 1.26
8 12b_2400000 0 1413 1.26
8 24_2100000 0.00474101 1414 1.26
8 24_1900000 0 1415 1.27
8 24_1800000 0.009237939 1416 1.27
8 24_1700000 0 1417 1.28
8 24_1600000 0 1418 1.28
8 24_1500000 0.018814936 1419 1.28
8 24_1400000 0 1420 1.30
8 24_1300000 0.01096139 1421 1.30
8 24_1200000 0 1422 1.31
8 24_1100000 0.005614695 1423 1.31
8 24_1000000 0 1424 1.31
8 24_900000 0.003459742 1425 1.31
8 24_800000 0.012033195 1426 1.32
8 24_600000 0.017205979 1427 1.33
8 24_500000 0.004160576 1428 1.35
8 24_400000 0.030122179 1429 1.35
8 24_300000 0.005230692 1430 1.38
8 24_100000 0 1431 1.38
8 24_0 0.005667574 1432 1.38
8 45_1600000 0.003479239 1433 1.39
8 45_1500000 0.013326544 1434 1.39
8 45_1400000 0 1435 1.41
8 45_1300000 0 1436 1.41
8 45_1200000 0 1437 1.41
8 45_1000000 0.011191504 1438 1.41
8 45_900000 0 1439 1.42
8 45_800000 0 1440 1.42
8 45_700000 0 1441 1.42
8 45_400000 0.0034342 1442 1.42
8 45_300000 0 1443 1.42
8 45_200000 0.01746974 1444 1.42
8 45_0 0.035128559 1445 1.44
8 285_100000 0 1446 1.47
8 285_0 0.070097202 1447 1.47
8 3_300000 0.083872698 1448 1.54
8 3_600000 0.039317861 1449 1.63
8 3_900000 0.027392115 1450 1.67
8 3_1100000 0.03727417 1451 1.70
8 3_1200000 0 1452 1.73
8 3_1400000 0.011683398 1453 1.73
8 3_1600000 0 1454 1.74
8 3_1700000 0.008455766 1455 1.74
8 3_1800000 0.004447948 1456 1.75
8 3_1900000 0.010795565 1457 1.76
8 3_2000000 0 1458 1.77
8 3_2100000 0.010346495 1459 1.77
8 3_2200000 0.012754707 1460 1.78
8 3_2300000 0.016122263 1461 1.79
8 3_2400000 0.004677813 1462 1.81
8 3_2500000 0.002764421 1463 1.81
8 3_2600000 0.010452609 1464 1.81
8 3_2700000 0 1465 1.82
8 3_2800000 0.007784579 1466 1.82
8 3_2900000 0.036077624 1467 1.83
8 3_3100000 0 1468 1.87
8 3_3200000 0 1469 1.87
8 3_3300000 0 1470 1.87
8 3_3400000 0.00279892 1471 1.87
8 3_3500000 0 1472 1.87
8 3_3600000 0 1473 1.87
8 3_3700000 0.002836348 1474 1.87
8 3_3800000 0.00324627 1475 1.87
8 3_3900000 0 1476 1.88
8 3_4000000 0.054471381 1477 1.88
8 3_4100000 0 1478 1.93
8 3_4200000 0.013497329 1479 1.93
8 3_4300000 1480 1.95
9 68b_1100000 0 1481 0.00
9 68b_900000 0.008617481 1482 0.00
9 68b_800000 0 1483 0.01
9 68b_700000 0 1484 0.01
9 68b_600000 0.017863587 1485 0.01
9 68b_500000 0.010477981 1486 0.03
9 68b_400000 0.005477024 1487 0.04
9 68b_300000 0.002441651 1488 0.04
9 68b_200000 0.004204881 1489 0.04
9 68b_100000 0.015106772 1490 0.05
9 124_700000 0.072805495 1491 0.06
9 124_600000 0 1492 0.14
9 124_500000 0.007816841 1493 0.14
9 124_400000 0 1494 0.14
9 124_300000 0 1495 0.14
9 124_100000 0 1496 0.14
9 124_0 0 1497 0.14
9 20_2300000 0 1498 0.14
9 20_2200000 0.004647854 1499 0.14
9 20_2100000 0.008908503 1500 0.15
9 20_2000000 0.022626381 1501 0.16
9 20_1800000 0.035917353 1502 0.18
9 20_1600000 0.003078006 1503 0.22
9 20_1500000 0 1504 0.22
9 20_1400000 0 1505 0.22
9 20_1200000 0 1506 0.22
9 20_1100000 0.007294854 1507 0.22
9 20_1000000 0 1508 0.23
9 20_900000 0.106057816 1509 0.23
9 20_700000 0.003766877 1510 0.33
9 20_600000 0.013966764 1511 0.34
9 20_500000 0.015347068 1512 0.35
9 20_400000 0 1513 0.37
9 352_0 0 1514 0.37
9 86_300000 0 1515 0.37
9 86_100000 0 1516 0.37
9 12a_300000 0.002480486 1517 0.37
9 12a_0 0 1518 0.37
9 283_0 0.002728757 1519 0.37
9 283_100000 0.023699758 1520 0.37
9 147_500000 0 1521 0.40
9 147_600000 0 1522 0.40
9 152_100000 0 1523 0.40
9 341_0 0 1524 0.40
9 103b_0 0.001053381 1525 0.40
9 103b_100000 0.003368773 1526 0.40
9 103b_300000 0.034615006 1527 0.40
9 103b_600000 0.008736579 1528 0.43
9 84_1000000 0.008936674 1529 0.44
9 84_700000 0.016139778 1530 0.45
9 84_500000 0 1531 0.47
9 84_400000 0.051162538 1532 0.47
9 84_300000 0.024778488 1533 0.52
9 97c_0 0.021312865 1534 0.54
9 97c_100000 0.017707457 1535 0.57
9 97c_200000 0 1536 0.58
9 157_300000 0.023825504 1537 0.58
9 157_400000 0 1538 0.61
9 157_500000 0.007081283 1539 0.61
9 238_0 0.005237259 1540 0.61
9 238_200000 0.011377145 1541 0.62
9 63b_900000 0.013587594 1542 0.63
9 63b_800000 0 1543 0.64
9 63b_700000 0 1544 0.64
9 63b_600000 0 1545 0.64
9 63b_500000 0 1546 0.64
9 63b_400000 0 1547 0.64
9 63b_300000 0.003448272 1548 0.64
9 63b_200000 0 1549 0.65
9 63b_100000 0.042606587 1550 0.65
9 63b_0 0.014091552 1551 0.69
9 257a_0 0 1552 0.70
9 7_3800000 0 1553 0.70
9 7_3700000 0.004046443 1554 0.70
9 7_3400000 0.065466242 1555 0.71
9 7_3300000 0.047058871 1556 0.77
9 7_3200000 0.013012878 1557 0.82
9 7_3100000 0.009400646 1558 0.83
9 7_3000000 0.008456938 1559 0.84
9 7_2900000 0 1560 0.85
9 7_2800000 0.00255502 1561 0.85
9 7_2700000 0.008307652 1562 0.85
9 7_2600000 0 1563 0.86
9 7_2500000 0.002097829 1564 0.86
9 7_2400000 0.060967303 1565 0.86
9 7_2200000 0 1566 0.93
9 7_2000000 0.01597552 1567 0.93
9 7_1900000 0.003339898 1568 0.94
9 7_1800000 0.008566715 1569 0.95
9 7_1600000 0.006447284 1570 0.95
9 7_1400000 0.006814296 1571 0.96
9 7_1300000 0 1572 0.97
9 7_1200000 0 1573 0.97
9 7_1100000 0 1574 0.97
9 7_1000000 0 1575 0.97
9 7_900000 0.010155041 1576 0.97
9 7_800000 0.006415131 1577 0.98
9 7_700000 0 1578 0.98
9 7_600000 0.041684352 1579 0.98
9 7_500000 0.004267436 1580 1.03
9 7_400000 0.006868818 1581 1.03
9 7_300000 0.008609102 1582 1.04
9 7_200000 0 1583 1.04
9 7_100000 0 1584 1.04
9 7_0 1585 1.04'''.split('\n')
SF = '''10 13_2600000 0 1 0.00
10 13_2500000 0.017659466 2 0.00
10 13_2400000 0 3 0.02
10 13_2300000 0.012655147 4 0.02
10 13_2200000 0 5 0.03
10 13_2100000 0 6 0.03
10 13_2000000 0.042215403 7 0.03
10 13_1900000 0 8 0.07
10 13_1800000 0.088594458 9 0.07
10 13_1700000 0.015733438 10 0.16
10 13_1600000 0.031020215 11 0.18
10 13_1500000 0.030677948 12 0.21
10 13_1400000 0.01167283 13 0.24
10 13_1300000 0.008764206 14 0.25
10 13_1200000 0.024238202 15 0.26
10 13_1100000 0 16 0.28
10 13_1000000 0.019575741 17 0.28
10 13_900000 0 18 0.30
10 13_800000 0.006889548 19 0.30
10 13_700000 0 20 0.31
10 13_600000 0 21 0.31
10 13_500000 0 22 0.31
10 13_400000 0 23 0.31
10 13_300000 0 24 0.31
10 13_200000 0 25 0.31
10 13_100000 0 26 0.31
10 13_0 0 27 0.31
10 749_0 0 28 0.31
10 206_400000 0 29 0.31
10 206_300000 0 30 0.31
10 206_200000 0 31 0.31
10 206_100000 0 32 0.31
10 206_0 0 33 0.31
10 4a_100000 0 34 0.31
10 4a_0 0 35 0.31
10 48a_800000 0 36 0.31
10 48a_700000 0 37 0.31
10 48a_600000 0 38 0.31
10 48a_400000 0 39 0.31
10 48a_300000 0 40 0.31
10 48a_200000 0 41 0.31
10 48a_100000 0 42 0.31
10 48a_0 0 43 0.31
10 210_0 0 44 0.31
10 210_100000 0 45 0.31
10 210_200000 0 46 0.31
10 210_300000 0 47 0.31
10 90_100000 0 48 0.31
10 90_200000 0 49 0.31
10 90_300000 0 50 0.31
10 90_400000 0 51 0.31
10 90_500000 0 52 0.31
10 90_600000 0 53 0.31
10 90_700000 0 54 0.31
10 90_800000 0 55 0.31
10 90_900000 0 56 0.31
10 90_1000000 0 57 0.31
10 324_100000 0 58 0.31
10 324_0 0 59 0.31
10 445_0 0 60 0.31
10 223_300000 0 61 0.31
10 223_200000 0 62 0.31
10 223_100000 0 63 0.31
10 223_0 0 64 0.31
10 193b_0 0 65 0.31
10 40_100000 0 66 0.31
10 40_500000 0 67 0.31
10 40_600000 0 68 0.31
10 40_700000 0.013458842 69 0.31
10 40_900000 0.020111335 70 0.32
10 40_1100000 0 71 0.34
10 40_1200000 0 72 0.34
10 40_1300000 0 73 0.34
10 40_1400000 0.002829093 74 0.34
10 267_200000 0.001449472 75 0.35
10 33_1700000 0.001502083 76 0.35
10 33_1600000 0 77 0.35
10 33_1500000 0 78 0.35
10 33_1400000 0.001549766 79 0.35
10 33_1300000 0.004080746 80 0.35
10 33_1200000 0 81 0.35
10 33_1100000 0.030367632 82 0.35
10 33_1000000 0.011279534 83 0.39
10 33_700000 0 84 0.40
10 33_600000 0 85 0.40
10 33_400000 0 86 0.40
10 9a_1100000 0 87 0.40
10 9a_1000000 0 88 0.40
10 9a_800000 0 89 0.40
10 9a_700000 0.01011728 90 0.40
10 9a_600000 0.010699292 91 0.41
10 9a_400000 0 92 0.42
10 9a_300000 0 93 0.42
10 9a_200000 0 94 0.42
10 156_500000 0.008340939 95 0.42
10 156_100000 0 96 0.43
10 172_0 0.006382887 97 0.43
10 172_200000 0 98 0.43
10 172_300000 0.007948458 99 0.43
10 50_0 0 100 0.44
10 50_100000 0 101 0.44
10 50_200000 0.006843627 102 0.44
10 50_300000 0.004818614 103 0.45
10 50_800000 0.002551058 104 0.45
10 50_900000 0 105 0.45
10 50_1100000 0.01404038 106 0.45
10 50_1200000 0.003795913 107 0.47
10 50_1300000 0.007865277 108 0.47
10 50_1400000 0 109 0.48
10 209_0 0 110 0.48
10 209_100000 0.011470044 111 0.48
10 209_300000 0 112 0.49
10 490_0 0 113 0.49
10 193a_300000 0.012855735 114 0.49
10 193a_200000 0.026271358 115 0.50
10 193a_100000 0 116 0.53
10 193a_0 0 117 0.53
10 125b_600000 0.006718379 118 0.53
10 125b_500000 0.006958995 119 0.54
10 125b_400000 0.020384125 120 0.54
10 125b_300000 0 121 0.56
10 125b_200000 0 122 0.56
10 125b_100000 0 123 0.56
10 125b_0 0 124 0.56
10 188_500000 0 125 0.56
10 188_400000 0.008059463 126 0.56
10 188_300000 0.022799104 127 0.57
10 188_200000 0 128 0.60
10 311_100000 0.024069335 129 0.60
10 311_0 0 130 0.62
10 204_0 0.011527177 131 0.62
10 204_100000 0.005740671 132 0.63
10 204_200000 0 133 0.64
10 204_300000 0.016637474 134 0.64
10 204_400000 0 135 0.65
10 87_0 0.006722284 136 0.65
10 87_100000 0.01529468 137 0.66
10 87_200000 0 138 0.68
10 87_300000 0 139 0.68
10 87_400000 0 140 0.68
10 87_500000 0.027288726 141 0.68
10 87_600000 0.017727546 142 0.70
10 87_700000 0.007778345 143 0.72
10 87_800000 0.013493717 144 0.73
10 87_900000 0 145 0.74
10 87_1000000 0.014931623 146 0.74
10 159_0 0.019343303 147 0.76
10 159_100000 0.031502931 148 0.78
10 159_200000 0.029904045 149 0.81
10 159_300000 0.011035988 150 0.84
10 159_400000 0.01193608 151 0.85
10 159_500000 152 0.86
11 75_0 0 153 0.00
11 75_100000 0 154 0.00
11 75_200000 0.038150519 155 0.00
11 75_300000 0.008868998 156 0.04
11 75_400000 0.015927922 157 0.05
11 75_500000 0 158 0.06
11 75_600000 0.018628662 159 0.06
11 75_700000 0.009786622 160 0.08
11 75_800000 0.025909969 161 0.09
11 75_900000 0.016579826 162 0.12
11 75_1000000 0.007948392 163 0.13
11 75_1100000 0.006259437 164 0.14
11 228_0 0.021468569 165 0.15
11 228_100000 0.01744936 166 0.17
11 228_200000 0 167 0.19
11 228_300000 0.006499147 168 0.19
11 273b_0 0.021448011 169 0.19
11 273b_100000 0.008321626 170 0.21
11 213_0 0.008398049 171 0.22
11 213_100000 0 172 0.23
11 213_200000 0.002351967 173 0.23
11 213_300000 0.008739306 174 0.23
11 213_400000 0 175 0.24
11 63a_0 0 176 0.24
11 63a_100000 0.016569617 177 0.24
11 63a_200000 0 178 0.26
11 257b_0 0.016903621 179 0.26
11 257b_100000 0 180 0.28
11 30_1900000 0.006809541 181 0.28
11 30_1800000 0.025592949 182 0.28
11 30_1700000 0.007908659 183 0.31
11 30_1600000 0 184 0.32
11 30_1500000 0.01293421 185 0.32
11 30_1400000 0 186 0.33
11 30_1300000 0 187 0.33
11 30_1200000 0.01435717 188 0.33
11 30_1100000 0.035393194 189 0.34
11 48b_600000 0 190 0.38
11 48b_500000 0.008437873 191 0.38
11 48b_400000 0.006973659 192 0.39
11 48b_300000 0 193 0.39
11 48b_200000 0.007032977 194 0.39
11 48b_100000 0 195 0.40
11 243_0 0.024330329 196 0.40
11 243_100000 0 197 0.43
11 243_200000 0 198 0.43
11 243_300000 0 199 0.43
11 182_400000 0 200 0.43
11 182_300000 0 201 0.43
11 182_200000 0 202 0.43
11 182_100000 0 203 0.43
11 182_0 0 204 0.43
11 415_0 0.037053739 205 0.43
11 185_0 0 206 0.46
11 185_100000 0 207 0.46
11 185_400000 0 208 0.46
11 39b_200000 0 209 0.46
11 39b_300000 0.008807442 210 0.46
11 39b_500000 0 211 0.47
11 39b_600000 0 212 0.47
11 39b_700000 0.015617587 213 0.47
11 6b_1200000 0 214 0.49
11 6b_1000000 0 215 0.49
11 47b_300000 0 216 0.49
11 47b_500000 0 217 0.49
11 47b_900000 0.016921957 218 0.49
11 49_100000 0 219 0.50
11 49_200000 0 220 0.50
11 49_300000 0 221 0.50
11 49_700000 0 222 0.50
11 49_1000000 0 223 0.50
11 49_1100000 0 224 0.50
11 49_1200000 0 225 0.50
11 49_1300000 0.003202565 226 0.50
11 239_100000 0 227 0.51
11 221a_100000 0.009850178 228 0.51
11 221a_0 0.006872602 229 0.52
11 100_0 0 230 0.52
11 100_100000 0.025983769 231 0.52
11 100_200000 0 232 0.55
11 100_300000 0 233 0.55
11 100_400000 0 234 0.55
11 100_500000 0.006613656 235 0.55
11 100_600000 0 236 0.56
11 100_700000 0 237 0.56
11 100_800000 0 238 0.56
11 100_900000 0 239 0.56
11 598_0 0.033715321 240 0.56
11 167_0 0 241 0.59
11 167_100000 0 242 0.59
11 167_200000 0.034459741 243 0.59
11 162_0 0.012246354 244 0.63
11 162_100000 0 245 0.64
11 162_200000 0 246 0.64
11 162_300000 0 247 0.64
11 162_400000 0 248 0.64
11 162_500000 0 249 0.64
11 161_500000 0 250 0.64
11 161_400000 0.020540264 251 0.64
11 161_300000 0.010499661 252 0.66
11 161_200000 0.010129008 253 0.67
11 161_100000 0 254 0.68
11 161_0 0 255 0.68
11 22a_0 0.023300803 256 0.68
11 22b_0 0.020054127 257 0.70
11 22b_100000 0.025595583 258 0.72
11 22b_200000 0.007134406 259 0.75
11 22b_300000 0.02112039 260 0.75
11 22b_400000 0 261 0.78
11 22b_500000 0.008102547 262 0.78
11 22b_600000 0.019157098 263 0.78
11 22b_700000 0.011854936 264 0.80
11 22b_800000 0.011067086 265 0.81
11 22b_900000 0.02220312 266 0.83
11 22b_1000000 0.027283481 267 0.85
11 22b_1100000 0.00238374 268 0.88
11 22b_1200000 269 0.88
12 58b_900000 0.014035171 270 0.00
12 58b_800000 0.000870665 271 0.01
12 58b_700000 0 272 0.01
12 58b_600000 0.023597201 273 0.01
12 58b_500000 0.008763686 274 0.04
12 58b_400000 0.008849392 275 0.05
12 58b_300000 0 276 0.06
12 58b_200000 0.010071692 277 0.06
12 58b_100000 0.013140862 278 0.07
12 220_400000 0 279 0.08
12 220_300000 0.010868148 280 0.08
12 220_200000 0.011445071 281 0.09
12 220_100000 0.016256319 282 0.10
12 336_100000 0.011029979 283 0.12
12 336_0 0.020932285 284 0.13
12 132_700000 0 285 0.15
12 132_600000 0.015633229 286 0.15
12 132_500000 0 287 0.17
12 132_400000 0.01453184 288 0.17
12 132_300000 0.010972617 289 0.18
12 132_200000 0.015109312 290 0.19
12 132_100000 0 291 0.21
12 132_0 0.017383657 292 0.21
12 349_0 0 293 0.22
12 332_0 0 294 0.22
12 332_100000 0.006400404 295 0.22
12 224_0 0.000926478 296 0.23
12 224_100000 0.007806511 297 0.23
12 224_300000 0 298 0.24
12 785_0 0.024401052 299 0.24
12 381_100000 0.008626971 300 0.26
12 703_0 0 301 0.27
12 299_300000 0.007350618 302 0.27
12 287_0 0 303 0.28
12 287_100000 0.013319221 304 0.28
12 201_0 0.013486715 305 0.29
12 201_100000 0 306 0.31
12 201_200000 0.012841493 307 0.31
12 201_300000 0.030394488 308 0.32
12 91_200000 0.031419892 309 0.35
12 91_300000 0 310 0.38
12 91_400000 0 311 0.38
12 91_600000 0 312 0.38
12 91_800000 0 313 0.38
12 31_1600000 0.003037315 314 0.38
12 31_1400000 0.014285098 315 0.38
12 31_1300000 0 316 0.40
12 31_1200000 0 317 0.40
12 31_800000 0 318 0.40
12 31_700000 0.012563775 319 0.40
12 31_600000 0.005310784 320 0.41
12 31_300000 0 321 0.42
12 31_200000 0 322 0.42
12 31_100000 0 323 0.42
12 114a_200000 0 324 0.42
12 114a_300000 0.002724758 325 0.42
12 241_0 0.009353737 326 0.42
12 241_200000 0 327 0.43
12 241_300000 0 328 0.43
12 160_400000 0 329 0.43
12 160_300000 0.010276809 330 0.43
12 160_200000 0.022191759 331 0.44
12 160_100000 0.06431455 332 0.46
12 143_300000 0 333 0.52
12 143_200000 0.001452377 334 0.52
12 143_100000 0.081697135 335 0.53
12 250_200000 0 336 0.61
12 271_100000 0.022274376 337 0.61
12 271_200000 0.008709098 338 0.63
12 819_0 0 339 0.64
12 309_0 0 340 0.64
12 142_0 0 341 0.64
12 142_600000 0.017456482 342 0.64
12 435_0 0 343 0.66
12 140a_200000 0.013417229 344 0.66
12 140a_100000 0.01377339 345 0.67
12 140a_0 0.010802526 346 0.68
12 37_1700000 0 347 0.69
12 37_1600000 0.011169317 348 0.69
12 37_1500000 0 349 0.71
12 37_1400000 0.00612988 350 0.71
12 37_1300000 0 351 0.71
12 37_1200000 0.005540463 352 0.71
12 37_1100000 0 353 0.72
12 37_1000000 0.017230632 354 0.72
12 37_900000 0 355 0.73
12 37_800000 0.011512072 356 0.73
12 37_700000 0 357 0.75
12 37_600000 0 358 0.75
12 37_500000 0.027669018 359 0.75
12 37_400000 0 360 0.77
12 37_300000 0.020496951 361 0.77
12 37_200000 0 362 0.79
12 37_100000 0.024818375 363 0.79
12 37_0 0.011319023 364 0.82
12 334_0 0.012274342 365 0.83
12 334_100000 0 366 0.84
12 297_100000 0 367 0.84
12 297_0 0.007204838 368 0.84
12 39a_800000 0 369 0.85
12 39a_700000 0.014910983 370 0.85
12 39a_600000 0 371 0.86
12 39a_500000 0 372 0.86
12 39a_400000 0 373 0.86
12 39a_300000 0.0212155 374 0.86
12 39a_200000 0 375 0.89
12 39a_100000 0.014699032 376 0.89
12 39a_0 0.031037298 377 0.90
12 17b_0 0.001410292 378 0.93
12 17b_100000 0.016914813 379 0.93
12 17b_200000 0 380 0.95
12 17b_300000 0.005639868 381 0.95
12 17b_400000 0 382 0.96
12 17b_500000 0.005780437 383 0.96
12 17b_600000 0.028287834 384 0.96
12 17b_700000 0.026955985 385 0.99
12 17b_800000 0 386 1.02
12 17b_900000 0.027013864 387 1.02
12 17b_1000000 0.006766459 388 1.04
12 17b_1100000 0 389 1.05
12 17b_1200000 0.009946739 390 1.05
12 44b_0 0.015324362 391 1.06
12 44b_100000 0.014947009 392 1.08
12 44b_200000 0.01008157 393 1.09
12 44b_300000 0 394 1.10
12 44b_400000 0.00771155 395 1.10
12 44b_500000 0.090004493 396 1.11
12 683_0 397 1.20
13 122_0 0 398 0.00
13 122_100000 0 399 0.00
13 122_200000 0.018837255 400 0.00
13 122_300000 0 401 0.02
13 122_400000 0 402 0.02
13 122_500000 0.009693861 403 0.02
13 122_600000 0.021075616 404 0.03
13 235_300000 0 405 0.05
13 235_200000 0.027092059 406 0.05
13 52_0 0.008605573 407 0.08
13 52_100000 0 408 0.09
13 52_200000 0 409 0.09
13 52_300000 0 410 0.09
13 52_500000 0.011202825 411 0.09
13 52_700000 0 412 0.10
13 52_800000 0 413 0.10
13 52_1300000 0 414 0.10
13 52_1400000 0 415 0.10
13 329_0 0.015153718 416 0.10
13 129b_200000 0.003972185 417 0.11
13 129b_300000 0 418 0.12
13 129b_400000 0.0260999 419 0.12
13 28_1600000 0 420 0.14
13 28_1400000 0.013337838 421 0.14
13 28_1300000 0 422 0.16
13 28_1100000 0.021041251 423 0.16
13 28_500000 0 424 0.18
13 186_300000 0.012917558 425 0.18
13 186_200000 0 426 0.19
13 139_600000 0 427 0.19
13 139_400000 0 428 0.19
13 139_300000 0.007127745 429 0.19
13 139_100000 0.006629694 430 0.20
13 139_0 0 431 0.20
13 34_600000 0 432 0.20
13 34_900000 0 433 0.20
13 34_1000000 0 434 0.20
13 34_1200000 0.034437644 435 0.20
13 34_1400000 0 436 0.24
13 34_1500000 0.011307362 437 0.24
13 34_1600000 0 438 0.25
13 34_1700000 0.019164982 439 0.25
13 236_100000 0 440 0.27
13 236_200000 0 441 0.27
13 264_0 0.020345572 442 0.27
13 225_200000 0.009960918 443 0.29
13 225_300000 0 444 0.30
13 107_100000 0 445 0.30
13 107_200000 0 446 0.30
13 107_300000 0 447 0.30
13 107_800000 0 448 0.30
13 138_200000 0.015175271 449 0.30
13 138_100000 0 450 0.31
13 230_0 0.015245249 451 0.31
13 230_200000 0 452 0.33
13 230_300000 0 453 0.33
13 174_0 0.007805769 454 0.33
13 174_100000 0 455 0.34
13 174_200000 0.018482195 456 0.34
13 174_400000 0 457 0.35
13 174_500000 0 458 0.35
13 344_0 0.009399104 459 0.35
13 108_800000 0.019180629 460 0.36
13 108_700000 0.016942395 461 0.38
13 108_600000 0 462 0.40
13 108_500000 0 463 0.40
13 108_400000 0 464 0.40
13 108_300000 0.026004183 465 0.40
13 108_200000 0 466 0.43
13 108_100000 0.029392922 467 0.43
13 245_300000 0.011826221 468 0.46
13 245_200000 0.03866471 469 0.47
13 245_100000 0.007072019 470 0.51
13 245_0 0 471 0.51
13 4b_3800000 0.016063859 472 0.51
13 4b_3700000 0.02035975 473 0.53
13 4b_3600000 0.013735405 474 0.55
13 4b_3500000 0.00741463 475 0.56
13 4b_3400000 0.005742509 476 0.57
13 4b_3300000 0.023026924 477 0.58
13 4b_3200000 0 478 0.60
13 4b_3100000 0.022650433 479 0.60
13 4b_3000000 0 480 0.62
13 4b_2900000 0 481 0.62
13 4b_2800000 0 482 0.62
13 4b_2700000 0.023149412 483 0.62
13 4b_2600000 0 484 0.65
13 4b_2500000 0.014299731 485 0.65
13 4b_2400000 0.014730189 486 0.66
13 4b_2300000 0.026708814 487 0.67
13 4b_2200000 0.009353885 488 0.70
13 4b_2100000 0 489 0.71
13 4b_2000000 0.029631933 490 0.71
13 4b_1900000 0.00986188 491 0.74
13 4b_1800000 0.011299754 492 0.75
13 4b_1700000 0 493 0.76
13 4b_1600000 0.008188256 494 0.76
13 4b_1500000 0.010623534 495 0.77
13 4b_1400000 0 496 0.78
13 4b_1300000 0.029137426 497 0.78
13 4b_1200000 0 498 0.81
13 4b_1100000 0.008139888 499 0.81
13 4b_1000000 0.020628213 500 0.82
13 4b_900000 0.008823001 501 0.84
13 4b_800000 0.009391673 502 0.85
13 4b_700000 0.01837459 503 0.86
13 4b_600000 0.00750236 504 0.87
13 4b_500000 0.02535383 505 0.88
13 4b_400000 0.013567218 506 0.91
13 4b_300000 0 507 0.92
13 4b_200000 0.023185442 508 0.92
13 4b_100000 0.035341743 509 0.94
13 4b_0 0 510 0.98
13 83b_700000 0.033116926 511 0.98
13 83b_600000 0.024967388 512 1.01
13 83b_500000 0 513 1.04
13 83b_400000 0.041251542 514 1.04
13 83b_300000 0 515 1.08
13 83b_200000 0 516 1.08
13 83b_100000 0.006270711 517 1.08
13 83b_0 0.008770689 518 1.09
13 115a_300000 0.011382731 519 1.09
13 115a_200000 0.008753774 520 1.11
13 115a_100000 0.019879781 521 1.11
13 115a_0 522 1.13
14 102_0 0.038233317 523 0.00
14 102_100000 0.024920953 524 0.04
14 102_200000 0.019059268 525 0.06
14 102_300000 0 527 0.08
14 102_400000 0.00821647 528 0.08
14 102_500000 0.024338495 529 0.09
14 102_600000 0.0119918 530 0.11
14 102_700000 0.010720696 531 0.13
14 102_800000 0 532 0.14
14 102_900000 0.017419253 533 0.14
14 6c_100000 0.004949194 534 0.15
14 6c_0 0.01964062 535 0.16
14 26a_0 0.000596793 536 0.18
14 26a_100000 0.019027673 537 0.18
14 26a_200000 0.018462545 538 0.20
14 26a_300000 0.012246016 539 0.22
14 26a_400000 0.018760826 540 0.23
14 26a_500000 0.016428977 541 0.25
14 26a_600000 0.013465882 542 0.27
14 26a_700000 0.015998324 543 0.28
14 26a_800000 0 544 0.29
14 26a_900000 0 545 0.29
14 26a_1000000 0.016674124 546 0.29
14 26a_1100000 0 547 0.31
14 26a_1200000 0.017471064 548 0.31
14 26a_1300000 0.027745606 549 0.33
14 26a_1400000 0 550 0.36
14 26a_1500000 0.021226891 551 0.36
14 278_0 0.017993181 552 0.38
14 278_100000 0.008467611 553 0.40
14 148_100000 0.007022532 554 0.40
14 148_200000 0.011258417 555 0.41
14 148_300000 0 556 0.42
14 148_400000 0.02833719 557 0.42
14 148_500000 0.008866793 558 0.45
14 148_600000 0 559 0.46
14 198_0 0.009787427 560 0.46
14 198_100000 0.029600831 561 0.47
14 198_300000 0 562 0.50
14 101_0 0.005339324 563 0.50
14 101_100000 0 564 0.50
14 101_200000 0 565 0.50
14 101_400000 0 566 0.50
14 101_500000 0 567 0.50
14 101_600000 0 568 0.50
14 101_700000 0.011455665 569 0.50
14 101_800000 0 570 0.52
14 101_900000 0.011338687 571 0.52
14 17a_1000000 0.009190302 572 0.53
14 17a_900000 0.001873551 573 0.54
14 17a_800000 0.010343841 574 0.54
14 17a_500000 0 575 0.55
14 17a_400000 0 576 0.55
14 17a_300000 0.003969653 577 0.55
14 17a_200000 0.009050439 578 0.55
14 17a_0 0.024478474 579 0.56
14 208_0 0.007236225 580 0.59
14 208_100000 0.017588745 581 0.59
14 208_300000 0.007247505 582 0.61
14 208_400000 0 583 0.62
14 247_200000 0 584 0.62
14 247_100000 0 585 0.62
14 247_0 0 586 0.62
14 419_0 0 587 0.62
14 207_300000 0.005542408 588 0.62
14 207_200000 0 589 0.62
14 207_100000 0 590 0.62
14 207_0 0 591 0.62
14 164_500000 0 592 0.62
14 164_400000 0.006506816 593 0.62
14 164_300000 0 594 0.63
14 164_200000 0 595 0.63
14 164_100000 0.015938575 596 0.63
14 164_0 0.030238107 597 0.65
14 123_800000 0.021083088 598 0.68
14 123_700000 0 599 0.70
14 123_600000 0.005678844 600 0.70
14 123_500000 0.011674437 601 0.70
14 123_400000 0 602 0.71
14 123_300000 0 603 0.71
14 123_200000 0.006598302 604 0.71
14 123_100000 0.013565394 605 0.72
14 123_0 0 606 0.73
14 15_0 0.015825363 607 0.73
14 15_100000 0.020050237 608 0.75
14 15_200000 0 609 0.77
14 15_300000 0.010837262 610 0.77
14 15_400000 0 611 0.78
14 15_500000 0.024231262 612 0.78
14 15_600000 0.009795037 613 0.81
14 15_700000 0.010040735 614 0.82
14 15_800000 0 615 0.83
14 15_1100000 0 616 0.83
14 15_2500000 0.021929791 617 0.83
14 300_100000 0 618 0.85
14 300_0 0 619 0.85
14 211_0 0 620 0.85
14 211_100000 0 621 0.85
14 211_200000 0 622 0.85
14 211_300000 0.003036056 623 0.85
14 127_500000 0.003026508 624 0.85
14 127_400000 0 625 0.85
14 127_300000 0 626 0.85
14 127_200000 0 627 0.85
14 127_100000 0 628 0.85
14 168_200000 0 629 0.85
14 168_300000 0 630 0.85
14 168_400000 0.009682224 631 0.85
14 168_500000 0 632 0.86
14 126_700000 0.022051607 633 0.86
14 126_600000 0 634 0.89
14 126_500000 0 635 0.89
14 126_400000 0 636 0.89
14 126_200000 0.011287354 637 0.89
14 126_100000 0 638 0.90
14 126_0 0.01117501 639 0.90
14 291_100000 0 640 0.91
14 361_100000 0 641 0.91
14 361_0 0 642 0.91
14 121_700000 0.011261715 643 0.91
14 121_600000 0 644 0.92
14 121_500000 0.011230824 645 0.92
14 121_300000 0 646 0.93
14 121_200000 0 647 0.93
14 121_100000 0 648 0.93
14 636_0 0 649 0.93
14 303_0 0 650 0.93
14 303_100000 0.017600185 651 0.93
14 1042_0 0 652 0.95
14 195_0 0 653 0.95
14 195_100000 0.012150282 654 0.95
14 195_200000 0.038258617 655 0.96
14 219_0 0 656 1.00
14 219_100000 0.016136826 657 1.00
14 366_0 0 658 1.01
14 323_0 0.008648936 659 1.01
14 128_100000 0.015095282 660 1.02
14 128_300000 0 661 1.04
14 128_400000 0.018470438 662 1.04
14 128_600000 0.007049792 663 1.06
14 128_700000 0.007203884 664 1.06
14 382_0 0 665 1.07
14 178_0 0 666 1.07
14 178_100000 0 667 1.07
14 178_200000 0.006575845 668 1.07
14 178_300000 0 669 1.08
14 178_400000 0.015743065 670 1.08
14 178_500000 0.010835249 671 1.09
14 140b_0 0.015597645 672 1.10
14 140b_100000 0 673 1.12
14 140b_200000 0.010440762 674 1.12
14 140b_300000 0 675 1.13
14 140b_400000 0.005568432 676 1.13
14 92_0 0.00557058 677 1.14
14 92_100000 0 678 1.14
14 92_200000 0 679 1.14
14 92_300000 0.016515892 680 1.14
14 92_400000 0 681 1.16
14 92_500000 0 682 1.16
14 92_600000 0.00553171 683 1.16
14 92_700000 0.011493724 684 1.16
14 92_800000 0 685 1.17
14 92_900000 0 686 1.17
14 92_1000000 0 687 1.17
14 459_0 0 688 1.17
14 290_0 0.018836486 689 1.17
14 290_100000 0.018788418 690 1.19
14 2_0 0 691 1.21
14 2_100000 0 692 1.21
14 2_200000 0.012316845 693 1.21
14 2_300000 0.00567215 694 1.22
14 2_400000 0.008600902 695 1.23
14 2_500000 0.008141832 696 1.24
14 2_600000 0 697 1.25
14 2_700000 0 698 1.25
14 2_800000 0 699 1.25
14 2_900000 0.011122584 700 1.25
14 2_1000000 0.016527307 701 1.26
14 2_1100000 0 702 1.27
14 2_1200000 0 703 1.27
14 2_1300000 0.028319991 704 1.27
14 2_1500000 0 705 1.30
14 2_1600000 0.01932088 706 1.30
14 2_1800000 0 707 1.32
14 2_1900000 0.016127512 708 1.32
14 2_2000000 0.022995998 709 1.34
14 2_2100000 0.06515753 710 1.36
14 2_2400000 0.023930988 711 1.43
14 2_2700000 0.006863338 712 1.45
14 2_2800000 0 713 1.46
14 2_2900000 0.060712754 714 1.46
14 2_3000000 0.015688901 715 1.52
14 2_3100000 0.01817352 716 1.53
14 2_3200000 0.010763504 717 1.55
14 2_3300000 0.006714792 718 1.56
14 2_3400000 0.006619871 719 1.57
14 2_3500000 0.008659911 720 1.58
14 2_3600000 0 721 1.58
14 2_3700000 0.005441541 722 1.58
14 2_3800000 0 723 1.59
14 2_3900000 0 724 1.59
14 2_4000000 0.011486787 725 1.59
14 2_4100000 0.03050215 726 1.60
14 2_4200000 0 727 1.63
14 2_4300000 0.039429163 728 1.63
14 2_4400000 0.008626127 729 1.67
14 2_4500000 0.005829312 730 1.68
14 58a_0 0 731 1.69
14 58a_100000 0.022215644 732 1.69
14 58a_200000 0.045377236 733 1.71
14 58a_300000 0 734 1.75
14 58a_400000 0 735 1.75
14 58a_500000 0 736 1.75
14 396_0 737 1.75
1 79_0 0 738 0.00
1 79_100000 0.029635766 739 0.00
1 79_200000 0 740 0.03
1 79_300000 0.026470717 741 0.03
1 79_400000 0.069939423 742 0.06
1 79_600000 0.041059607 743 0.13
1 79_700000 0 744 0.17
1 79_800000 0.012300322 745 0.17
1 79_900000 0.017020479 746 0.18
1 79_1000000 0 747 0.20
1 79_1100000 0.005594975 748 0.20
1 69_0 0.005475702 749 0.20
1 69_100000 0.005542747 750 0.21
1 69_200000 0.005535396 751 0.21
1 69_300000 0 752 0.22
1 69_400000 0.007310296 753 0.22
1 69_500000 0.021070264 754 0.23
1 69_600000 0 755 0.25
1 69_700000 0 756 0.25
1 69_800000 0.007784751 757 0.25
1 69_900000 0.008479205 758 0.25
1 69_1000000 0.008399799 759 0.26
1 69_1100000 0 760 0.27
1 69_1200000 0.006131431 761 0.27
1 181_0 0.011545982 762 0.28
1 181_100000 0 763 0.29
1 181_200000 0.031056444 764 0.29
1 181_300000 0.013084697 765 0.32
1 181_400000 0.014153766 766 0.33
1 60_0 0.021871162 767 0.35
1 60_100000 0.018518933 768 0.37
1 60_200000 0 769 0.39
1 60_300000 0.021761343 770 0.39
1 60_400000 0.005537909 771 0.41
1 60_500000 0.00555682 772 0.42
1 60_600000 0.008126132 773 0.42
1 60_700000 0.011445238 774 0.43
1 60_800000 0.016886102 775 0.44
1 60_900000 0.010904455 776 0.46
1 60_1000000 0.015945345 777 0.47
1 60_1100000 0.005294221 778 0.48
1 60_1200000 0.011290406 779 0.49
1 60_1300000 0 780 0.50
1 60_1400000 0 781 0.50
1 333_0 0 782 0.50
1 333_100000 0 783 0.50
1 165_0 0 784 0.50
1 165_100000 0 785 0.50
1 165_200000 0.020778759 786 0.50
1 165_300000 0 787 0.52
1 165_400000 0.024616545 788 0.52
1 308_100000 0.015666635 789 0.55
1 314_100000 0.033303734 790 0.56
1 314_0 0 791 0.60
1 26b_400000 0 792 0.60
1 26b_200000 0 793 0.60
1 26b_100000 0.031710008 794 0.60
1 240_200000 0.017838524 795 0.63
1 240_100000 0 796 0.64
1 82_1000000 0.015865154 797 0.64
1 82_900000 0 798 0.66
1 82_800000 0.003422531 799 0.66
1 82_700000 0 800 0.66
1 82_600000 0 801 0.66
1 82_500000 0 802 0.66
1 82_400000 0.007281069 803 0.66
1 82_300000 0.012812414 804 0.67
1 82_200000 0.004880347 805 0.68
1 82_100000 0 806 0.69
1 427_0 0 807 0.69
1 254b_100000 0 808 0.69
1 80a_800000 0 809 0.69
1 80a_700000 0 810 0.69
1 80a_600000 0 811 0.69
1 80a_500000 0.020270682 812 0.69
1 80a_400000 0.008340028 813 0.71
1 80a_300000 0 814 0.72
1 80a_200000 0.010316931 815 0.72
1 80a_100000 0.030271367 816 0.73
1 146_0 0.024160482 817 0.76
1 146_200000 0.040002149 818 0.78
1 83a_100000 0 819 0.82
1 74_1100000 0 820 0.82
1 232_100000 0 821 0.82
1 232_300000 0 822 0.82
1 115b_400000 823 0.82
2 18_2200000 0 824 0.00
2 18_2100000 0.003316805 825 0.00
2 18_2000000 0.009801931 826 0.00
2 18_1900000 0.022103486 827 0.01
2 18_1800000 0.03169995 828 0.04
2 18_1700000 0.006196689 829 0.07
2 18_1600000 0.03290237 830 0.07
2 18_1500000 0 831 0.11
2 18_1400000 0.013970441 832 0.11
2 18_1300000 0.014680296 833 0.12
2 18_1200000 0 834 0.13
2 18_1100000 0.027299833 835 0.13
2 18_1000000 0.027872704 836 0.16
2 18_900000 0.013722122 837 0.19
2 18_800000 0.019704154 838 0.20
2 18_700000 0 839 0.22
2 18_600000 0.008363445 840 0.22
2 18_500000 0.009568395 841 0.23
2 18_400000 0.018829007 842 0.24
2 18_300000 0 843 0.26
2 18_200000 0.0324912 844 0.26
2 18_100000 0.021677412 845 0.29
2 18_0 0 846 0.31
2 697_0 0 847 0.31
2 89_0 0 848 0.31
2 89_100000 0.006206134 849 0.31
2 89_200000 0 850 0.32
2 89_300000 0.069572724 851 0.32
2 89_400000 0.070967132 852 0.39
2 89_500000 0.031986908 853 0.46
2 89_600000 0 854 0.49
2 89_700000 0.031532033 855 0.49
2 89_800000 0 856 0.52
2 89_900000 0 857 0.52
2 89_1000000 0.007645803 858 0.52
2 44a_700000 0.005409866 859 0.53
2 44a_600000 0.007683257 860 0.54
2 44a_500000 0.015132905 861 0.55
2 44a_400000 0 862 0.56
2 44a_300000 0 863 0.56
2 44a_200000 0.005313501 864 0.56
2 44a_0 0 865 0.57
2 212_0 0 866 0.57
2 212_100000 0 867 0.57
2 212_200000 0.010861453 868 0.57
2 212_300000 0 869 0.58
2 212_400000 0 870 0.58
2 249_0 0.017299084 871 0.58
2 249_100000 0 872 0.59
2 249_200000 0 873 0.59
2 81_0 0 874 0.59
2 81_100000 0 875 0.59
2 81_200000 0.011271549 876 0.59
2 81_300000 0 877 0.61
2 81_400000 0 878 0.61
2 81_500000 0 879 0.61
2 81_700000 0 880 0.61
2 81_800000 0 881 0.61
2 81_900000 0 882 0.61
2 216_300000 0.011222012 883 0.61
2 112_800000 0 884 0.62
2 112_400000 0 885 0.62
2 112_200000 0 886 0.62
2 112_100000 0 887 0.62
2 19_200000 0 888 0.62
2 19_300000 0.011352724 889 0.62
2 19_400000 0 890 0.63
2 19_600000 0 891 0.63
2 19_800000 0 892 0.63
2 19_1000000 0 893 0.63
2 19_1100000 0 894 0.63
2 19_1200000 0 895 0.63
2 19_1300000 0 896 0.63
2 19_1500000 0 897 0.63
2 73_1100000 0.005537946 898 0.63
2 73_800000 0 899 0.63
2 73_700000 0 900 0.63
2 65_0 0 901 0.63
2 65_300000 0.013459178 902 0.63
2 65_500000 0.018572174 903 0.65
2 65_600000 0.011352193 904 0.67
2 65_1000000 0 905 0.68
2 65_1100000 0 906 0.68
2 65_1200000 0.005785115 907 0.68
2 42_100000 0 908 0.68
2 42_300000 0.010176683 909 0.68
2 42_400000 0.010409415 910 0.69
2 42_800000 0 911 0.70
2 42_1000000 0.006012102 912 0.70
2 42_1200000 0 913 0.71
2 42_1400000 0 914 0.71
2 42_1500000 0.013817312 915 0.71
2 42_1600000 0 916 0.72
2 173_400000 0 917 0.72
2 173_300000 0 918 0.72
2 173_200000 0 919 0.72
2 173_100000 0.01284302 920 0.72
2 41a_0 0.012643991 921 0.74
2 41a_200000 0 922 0.75
2 41a_300000 0 923 0.75
2 41a_400000 0.019120539 924 0.75
2 41a_500000 0 925 0.77
2 41a_600000 0 926 0.77
2 41a_700000 0.013336687 927 0.77
2 41a_800000 0 928 0.78
2 41a_900000 0 929 0.78
2 41a_1000000 0 930 0.78
2 41a_1100000 0.016630622 931 0.78
2 41a_1200000 0.025520659 932 0.80
2 41a_1400000 0.023709632 933 0.82
2 41a_1500000 0.023028785 934 0.85
2 429_0 0 935 0.87
2 151_0 0 936 0.87
2 151_100000 0 937 0.87
2 151_200000 0.007581923 938 0.87
2 151_300000 0 939 0.88
2 151_400000 0 940 0.88
2 151_500000 0.014166822 941 0.88
2 151_600000 0 942 0.89
2 27_0 0 943 0.89
2 27_100000 0 944 0.89
2 27_200000 0 945 0.89
2 27_300000 0.016413919 946 0.89
2 27_400000 0.016662344 947 0.91
2 27_500000 0 948 0.92
2 27_600000 0.015901826 949 0.92
2 27_700000 0 950 0.94
2 27_800000 0.021496711 951 0.94
2 27_900000 0.026109726 952 0.96
2 27_1000000 0.004586392 953 0.99
2 27_1100000 0.007874583 954 0.99
2 27_1200000 0.012657935 955 1.00
2 27_1300000 0 956 1.01
2 27_1400000 0.029761011 957 1.01
2 27_1500000 0.02900734 958 1.04
2 27_1600000 0 959 1.07
2 27_1700000 0.011042725 960 1.07
2 27_1800000 0.018429685 961 1.08
2 27_1900000 0 962 1.10
2 27_2000000 963 1.10
3 137_400000 0 964 0.00
3 137_300000 0.037723922 965 0.00
3 137_200000 0.005824139 966 0.04
3 137_100000 0.00571334 967 0.04
3 137_0 0.029578931 968 0.05
3 316_0 0 969 0.08
3 316_100000 0.005962059 970 0.08
3 55a_300000 0 971 0.08
3 55a_200000 0.018990618 972 0.08
3 55a_100000 0 973 0.10
3 55a_0 0 974 0.10
3 116_700000 0.004519113 975 0.10
3 116_600000 0.012711652 976 0.11
3 116_500000 0 977 0.12
3 116_400000 0 978 0.12
3 116_300000 0.006150798 979 0.12
3 116_100000 0 980 0.13
3 116_0 0 981 0.13
3 246_200000 0 982 0.13
3 62_1200000 0.008117871 983 0.13
3 62_900000 0.00961296 984 0.14
3 62_800000 0 985 0.14
3 62_600000 0.012988053 986 0.14
3 62_500000 0.013413016 987 0.16
3 389_0 0 988 0.17
3 70b_0 0.007928094 989 0.17
3 72_300000 0 990 0.18
3 72_200000 0.003847418 991 0.18
3 72_100000 0.009053903 992 0.18
3 72_0 0 993 0.19
3 35_0 0 994 0.19
3 23_1900000 0.000444541 995 0.19
3 23_1600000 0 996 0.19
3 23_1100000 0.022033624 997 0.19
3 23_1000000 0 998 0.21
3 23_900000 0 999 0.21
3 23_800000 0 1000 0.21
3 23_700000 0.0061472 1001 0.21
3 23_600000 0.009129728 1002 0.22
3 23_0 0 1003 0.23
3 265_100000 0 1004 0.23
3 265_0 0.01302335 1005 0.23
3 5a_1800000 0 1006 0.24
3 5a_1700000 0 1007 0.24
3 5a_1600000 0.013383173 1008 0.24
3 5a_1500000 0.005881866 1009 0.26
3 5a_1400000 0 1010 0.26
3 5a_1300000 0.023007229 1011 0.26
3 5a_1100000 0 1012 0.29
3 5a_1000000 0 1013 0.29
3 5a_900000 0 1014 0.29
3 5a_800000 0 1015 0.29
3 5a_700000 0 1016 0.29
3 5a_600000 0 1017 0.29
3 5a_500000 0.010948789 1018 0.29
3 5a_400000 0.01714605 1019 0.30
3 5a_300000 0 1020 0.31
3 5a_200000 0 1021 0.31
3 106a_0 0.051214912 1022 0.31
3 106a_100000 0 1023 0.36
3 106a_200000 0 1024 0.36
3 106a_300000 0.033646345 1025 0.36
3 106a_400000 0 1026 0.40
3 106a_500000 0 1027 0.40
3 106a_600000 0.005410567 1028 0.40
3 106a_700000 0.018986952 1029 0.40
3 258_200000 0 1030 0.42
3 258_100000 0.030458797 1031 0.42
3 258_0 0 1032 0.45
3 169_500000 0 1033 0.45
3 169_400000 0.003820454 1034 0.45
3 169_300000 0.008405738 1035 0.46
3 169_200000 0 1036 0.47
3 169_100000 0.01578049 1037 0.47
3 169_0 0 1038 0.48
3 98_1000000 0.008615035 1039 0.48
3 98_900000 0.006545236 1040 0.49
3 98_800000 0.027369894 1041 0.50
3 98_700000 0.016731305 1042 0.52
3 98_600000 0 1043 0.54
3 98_500000 0.021515993 1044 0.54
3 98_400000 0.021548768 1045 0.56
3 98_300000 0.03361935 1046 0.58
3 98_200000 0 1047 0.62
3 98_100000 0.012757897 1048 0.62
3 98_0 0 1049 0.63
3 261_200000 0 1050 0.63
3 261_100000 0 1051 0.63
3 261_0 0.023250533 1052 0.63
3 273a_0 0.007306116 1053 0.65
3 64_1300000 0 1054 0.66
3 64_1200000 0.030579313 1055 0.66
3 64_1100000 0 1056 0.69
3 64_1000000 0.022069445 1057 0.69
3 64_900000 0.009338588 1058 0.71
3 64_800000 0.03330217 1059 0.72
3 64_700000 0 1060 0.76
3 64_600000 0.027410986 1061 0.76
3 64_500000 0.008897782 1062 0.78
3 64_400000 0.029631783 1063 0.79
3 64_300000 0.006941244 1064 0.82
3 64_200000 0.014444881 1065 0.83
3 64_100000 0.004314283 1066 0.84
3 64_0 1067 0.85
4 1_4900000 0 1068 0.00
4 1_4800000 0 1069 0.00
4 1_4700000 0.010734359 1070 0.00
4 1_4600000 0.020719119 1071 0.01
4 1_4500000 0.013107143 1072 0.03
4 1_4400000 0 1073 0.04
4 1_4300000 0 1074 0.04
4 1_4200000 0.006339507 1075 0.04
4 1_4100000 0.013698188 1076 0.05
4 1_4000000 0.010749659 1077 0.06
4 1_3900000 0.015029433 1078 0.08
4 1_3800000 0.025921742 1079 0.09
4 1_3700000 0 1080 0.12
4 1_3600000 0.017292487 1081 0.12
4 1_3500000 0.007222629 1082 0.13
4 1_3400000 0.028237014 1083 0.14
4 1_3300000 0 1084 0.17
4 1_3200000 0.005519985 1085 0.17
4 1_3100000 0.005815674 1086 0.17
4 1_3000000 0.011419817 1087 0.18
4 1_2900000 0.010956223 1088 0.19
4 1_2800000 0.022640408 1089 0.20
4 1_2700000 0.007784277 1090 0.23
4 1_2600000 0.012305241 1091 0.23
4 1_2500000 0 1092 0.25
4 1_2400000 0.017546703 1093 0.25
4 1_2300000 0 1094 0.26
4 1_2200000 0.034425256 1095 0.26
4 1_2100000 0 1096 0.30
4 1_2000000 0 1097 0.30
4 1_1900000 0 1098 0.30
4 1_1800000 0.00862317 1099 0.30
4 1_1700000 0.007979469 1100 0.31
4 1_1600000 0.010687408 1101 0.31
4 1_1500000 0.013749435 1102 0.32
4 1_1400000 0.006480557 1103 0.34
4 1_1300000 0.014883493 1104 0.34
4 1_1200000 0.012313129 1105 0.36
4 1_1100000 0.024913738 1106 0.37
4 1_1000000 0 1107 0.40
4 1_900000 0.011301809 1108 0.40
4 1_800000 0 1109 0.41
4 1_700000 0.006282166 1110 0.41
4 1_600000 0.0188164 1111 0.41
4 1_500000 0 1112 0.43
4 1_400000 0.012914271 1113 0.43
4 1_300000 0.018509823 1114 0.45
4 1_200000 0.021894127 1115 0.46
4 1_100000 0 1116 0.49
4 1_0 0.005542923 1117 0.49
4 54_1400000 0.005609001 1118 0.49
4 54_1300000 0 1119 0.50
4 54_1200000 0 1120 0.50
4 54_1100000 0.016671339 1121 0.50
4 54_1000000 0 1122 0.51
4 54_900000 0 1123 0.51
4 54_800000 0 1124 0.51
4 54_700000 0 1125 0.51
4 54_600000 0.008284409 1126 0.51
4 54_500000 0.008330713 1127 0.52
4 54_400000 0 1128 0.53
4 54_300000 0.005684992 1129 0.53
4 54_100000 0 1130 0.54
4 54_0 0 1131 0.54
4 120_0 0 1132 0.54
4 120_100000 0 1133 0.54
4 120_300000 0 1134 0.54
4 120_400000 0 1135 0.54
4 120_600000 0.011755191 1136 0.54
4 120_700000 0 1137 0.55
4 120_800000 0.008850837 1138 0.55
4 150_200000 0.005074116 1139 0.56
4 150_300000 0.015690281 1140 0.56
4 57_700000 0.012369102 1141 0.58
4 57_600000 0 1142 0.59
4 57_500000 0.016261142 1143 0.59
4 57_400000 0.012155462 1144 0.61
4 57_300000 0 1145 0.62
4 57_100000 0 1146 0.62
4 254a_0 0 1147 0.62
4 154_0 0 1148 0.62
4 154_100000 0 1149 0.62
4 154_200000 0.011903491 1150 0.62
4 154_300000 0 1151 0.63
4 154_400000 0 1152 0.63
4 154_500000 0 1153 0.63
4 110_0 0.005554805 1154 0.63
4 110_100000 0 1155 0.64
4 110_200000 0 1156 0.64
4 110_300000 0 1157 0.64
4 110_400000 0 1158 0.64
4 110_600000 0 1159 0.64
4 134_700000 0 1160 0.64
4 177_400000 0.00579875 1161 0.64
4 177_300000 0.000947669 1162 0.64
4 177_200000 0.001393692 1163 0.64
4 177_100000 0 1164 0.64
4 177_0 0 1165 0.64
4 6a_2300000 0 1166 0.64
4 6a_2200000 0 1167 0.64
4 6a_2100000 0 1168 0.64
4 6a_2000000 0 1169 0.64
4 6a_1900000 0 1170 0.64
4 6a_1800000 0 1171 0.64
4 6a_1700000 0.014238035 1172 0.64
4 6a_1300000 0.007443711 1173 0.66
4 6a_1100000 0.025590906 1174 0.67
4 6a_1000000 0 1175 0.69
4 6a_900000 0.006813136 1176 0.69
4 6a_800000 0.017332997 1177 0.70
4 6a_700000 0.04841576 1178 0.72
4 6a_600000 0.07437022 1179 0.76
4 55b_100000 0.025586452 1180 0.84
4 55b_200000 0 1181 0.86
4 55b_300000 0.017260166 1182 0.86
4 55b_500000 0 1183 0.88
4 55b_600000 0 1184 0.88
4 55b_1000000 0.0086802 1185 0.88
4 443_0 0.00874095 1186 0.89
4 346_0 0 1187 0.90
4 346_100000 0 1188 0.90
4 320_100000 0.013570079 1189 0.90
4 320_0 0.008465065 1190 0.91
4 96a_0 0 1191 0.92
4 96a_100000 0.019093089 1192 0.92
4 96a_200000 0.034026217 1193 0.94
4 96a_500000 0.025489593 1194 0.97
4 96a_600000 0 1195 1.00
4 96a_700000 0.015253456 1196 1.00
4 96a_800000 0 1197 1.02
4 96a_900000 0.017444791 1198 1.02
4 85_1100000 0 1199 1.03
4 85_1000000 0 1200 1.03
4 85_900000 0.007540448 1201 1.03
4 85_700000 0.010184288 1202 1.04
4 85_600000 0.029950209 1203 1.05
4 85_200000 0.009254798 1204 1.08
4 85_100000 0 1205 1.09
4 1287_0 0.017395055 1206 1.09
4 196_500000 0.012740103 1207 1.11
4 196_300000 0.029727366 1208 1.12
4 196_0 0 1209 1.15
4 541_0 0.032437303 1210 1.15
4 44c_200000 0.017934224 1211 1.18
4 682_0 0 1212 1.20
4 479_0 0 1213 1.20
4 384_0 0.007797406 1214 1.20
4 163_100000 0.046389774 1215 1.21
4 163_400000 0.010521216 1216 1.25
4 163_500000 0.03365375 1217 1.26
4 633_0 1218 1.30
5 129a_0 0 1219 0.00
5 129a_100000 0.018940609 1220 0.00
5 129a_200000 0 1221 0.02
5 66_0 0 1222 0.02
5 66_100000 0.022012467 1223 0.02
5 66_200000 0.017377257 1224 0.04
5 66_300000 0.018461772 1225 0.06
5 66_400000 0.015639557 1226 0.08
5 66_500000 0.014744239 1227 0.09
5 66_600000 0.046947333 1228 0.11
5 66_700000 0.006372034 1229 0.15
5 66_800000 0.023956087 1230 0.16
5 66_900000 0.006896103 1231 0.18
5 66_1000000 0.008438851 1232 0.19
5 66_1100000 0.009415086 1233 0.20
5 66_1200000 0.031430626 1234 0.21
5 32_0 0 1235 0.24
5 32_100000 0.033062436 1236 0.24
5 32_200000 0.010862445 1237 0.27
5 32_300000 0 1238 0.28
5 32_400000 0 1239 0.28
5 32_500000 0.005605214 1240 0.28
5 32_600000 0.011060795 1241 0.29
5 32_700000 0 1242 0.30
5 32_800000 0.051058693 1243 0.30
5 32_900000 0 1244 0.35
5 32_1000000 0 1245 0.35
5 32_1100000 0.020921291 1246 0.35
5 32_1200000 0 1247 0.37
5 32_1300000 0 1248 0.37
5 32_1400000 0.009527439 1249 0.37
5 32_1600000 0.013785405 1250 0.38
5 32_1700000 0.002758844 1251 0.40
5 32_1800000 0.010136751 1252 0.40
5 205_0 0.011309791 1253 0.41
5 205_100000 0 1254 0.42
5 205_200000 0.005671426 1255 0.42
5 205_300000 0.005181161 1256 0.43
5 205_400000 0.023901082 1257 0.43
5 461_0 0.008211164 1258 0.46
5 242_0 0 1259 0.46
5 242_100000 0.019412053 1260 0.46
5 242_200000 0 1261 0.48
5 242_300000 0.005038415 1262 0.48
5 104a_0 0 1263 0.49
5 104a_100000 0 1264 0.49
5 104a_200000 0.006964353 1265 0.49
5 244_100000 0 1266 0.50
5 244_0 0.024801422 1267 0.50
5 111a_600000 0.013719422 1268 0.52
5 111a_500000 0 1269 0.53
5 111a_400000 0.026118973 1270 0.53
5 94_200000 0 1271 0.56
5 94_400000 0.021719838 1272 0.56
5 94_500000 0 1273 0.58
5 94_600000 0 1274 0.58
5 94_800000 0.001352267 1275 0.58
5 94_900000 0.017143872 1276 0.58
5 88_400000 0 1277 0.60
5 304_100000 0.076946722 1278 0.60
5 263_0 0.072804683 1279 0.68
5 149_300000 0.013090569 1280 0.75
5 358_0 0 1281 0.76
5 288_100000 0.013133048 1282 0.76
5 197_0 0.005906016 1283 0.78
5 197_200000 0.00095231 1284 0.78
5 197_300000 0 1285 0.78
5 197_400000 0.019411255 1286 0.78
5 327_100000 0 1287 0.80
5 327_0 0 1288 0.80
5 170_500000 0.028962241 1289 0.80
5 170_400000 0.032514437 1290 0.83
5 170_300000 0.008267776 1291 0.86
5 170_200000 0 1292 0.87
5 170_100000 0.007306903 1293 0.87
5 170_0 0.019150388 1294 0.88
5 158_600000 0 1295 0.90
5 158_500000 0.015336405 1296 0.90
5 158_400000 0 1297 0.91
5 158_300000 0 1298 0.91
5 158_200000 0.011172672 1299 0.91
5 158_100000 0 1300 0.92
5 158_0 0.000792655 1301 0.92
5 449_0 0.029239146 1302 0.93
5 36_0 0.047585698 1303 0.95
5 36_100000 0 1304 1.00
5 36_200000 0 1305 1.00
5 36_300000 0.006377967 1306 1.00
5 36_400000 0.012344665 1307 1.01
5 36_500000 0.00990936 1308 1.02
5 36_600000 0.030147962 1309 1.03
5 36_700000 0 1310 1.06
5 36_800000 0 1311 1.06
5 36_900000 0 1312 1.06
5 36_1000000 0.01574498 1313 1.06
5 36_1100000 0 1314 1.08
5 36_1200000 0 1315 1.08
5 36_1300000 0.020858153 1316 1.08
5 36_1400000 0.010248264 1317 1.10
5 36_1500000 0.01196935 1318 1.11
5 36_1600000 0.008150865 1319 1.12
5 36_1700000 0 1320 1.13
5 266_0 0.015340931 1321 1.13
5 266_100000 0 1322 1.14
5 266_200000 1323 1.14
6 270_0 0 1324 0.00
6 270_100000 0 1325 0.00
6 270_200000 0.015509726 1326 0.00
6 104b_500000 0.01526295 1327 0.02
6 104b_400000 0.027972331 1328 0.03
6 104b_300000 0.016230374 1329 0.06
6 104b_200000 0.028063186 1330 0.07
6 104b_100000 0.015724248 1331 0.10
6 104b_0 0.034093611 1332 0.12
6 51_1400000 0 1333 0.15
6 51_1300000 0.006943305 1334 0.15
6 51_1200000 0 1335 0.16
6 51_1100000 0.007588603 1336 0.16
6 51_1000000 0.01288425 1337 0.17
6 51_900000 0.002615008 1338 0.18
6 51_800000 0 1339 0.18
6 51_700000 0.034498034 1340 0.18
6 51_600000 0 1341 0.22
6 51_500000 0 1342 0.22
6 51_400000 0.051837025 1343 0.22
6 51_300000 0 1344 0.27
6 51_200000 0.032195786 1345 0.27
6 51_100000 0.008312091 1346 0.30
6 51_0 0 1347 0.31
6 8_3400000 0.032432907 1348 0.31
6 8_3300000 0.013221366 1349 0.34
6 8_3200000 0 1350 0.36
6 8_3100000 0 1351 0.36
6 8_3000000 0.020760463 1352 0.36
6 8_2900000 0.008197438 1353 0.38
6 8_2800000 0.024181776 1354 0.38
6 8_2700000 0 1355 0.41
6 8_2600000 0.025986461 1356 0.41
6 8_2500000 0 1357 0.43
6 8_2400000 0.011957013 1358 0.43
6 8_2300000 0 1359 0.45
6 8_2200000 0.057322053 1360 0.45
6 8_2000000 0.010041059 1361 0.50
6 8_1900000 0.038739418 1362 0.51
6 8_1800000 0.005816237 1363 0.55
6 8_1700000 0 1364 0.56
6 8_1600000 0 1365 0.56
6 8_1500000 0.019673329 1366 0.56
6 8_1400000 0.019684599 1367 0.58
6 8_1300000 0 1368 0.60
6 8_1200000 0.014034737 1369 0.60
6 8_1100000 0 1370 0.61
6 8_1000000 0.013763813 1371 0.61
6 8_900000 0.011248225 1372 0.63
6 8_800000 0.008201481 1373 0.64
6 8_700000 0 1374 0.64
6 8_600000 0.008412175 1375 0.64
6 8_500000 0.005499074 1376 0.65
6 8_400000 0 1377 0.66
6 8_300000 0.005508228 1378 0.66
6 8_200000 0 1379 0.66
6 8_100000 0.005581633 1380 0.66
6 8_0 0 1381 0.67
6 293_100000 0 1382 0.67
6 293_0 0.020493699 1383 0.67
6 379a_0 0 1384 0.69
6 21_0 0.012855695 1385 0.69
6 21_100000 0 1386 0.70
6 21_200000 0 1387 0.70
6 21_300000 0 1388 0.70
6 21_400000 0 1389 0.70
6 21_500000 0 1390 0.70
6 21_600000 0 1391 0.70
6 21_700000 0 1392 0.70
6 21_1200000 0.005314366 1393 0.70
6 21_1300000 0.002121852 1394 0.71
6 21_1600000 0 1395 0.71
6 21_1700000 0.009352474 1396 0.71
6 136_0 0 1397 0.72
6 136_300000 0.008421756 1398 0.72
6 136_400000 0 1399 0.73
6 136_500000 0.005385148 1400 0.73
6 136_600000 0 1401 0.73
6 43_1000000 0 1402 0.73
6 43_1100000 0 1403 0.73
6 43_1200000 0 1404 0.73
6 43_1400000 0 1405 0.73
6 43_1500000 0 1406 0.73
6 67_300000 0 1407 0.73
6 67_400000 0 1408 0.73
6 67_500000 0 1409 0.73
6 119_100000 0 1410 0.73
6 119_200000 0.019594363 1411 0.73
6 119_300000 0 1412 0.75
6 119_500000 0.007840303 1413 0.75
6 248_100000 0 1414 0.76
6 25_1400000 0 1415 0.76
6 25_1200000 0 1416 0.76
6 25_1000000 0 1417 0.76
6 25_900000 0 1418 0.76
6 25_800000 0 1419 0.76
6 25_500000 0 1420 0.76
6 25_400000 0.011596604 1421 0.76
6 25_300000 0 1422 0.77
6 25_200000 0 1423 0.77
6 25_100000 0 1424 0.77
6 190_400000 0 1425 0.77
6 190_300000 0.015079858 1426 0.77
6 190_200000 0 1427 0.79
6 190_0 0.007703312 1428 0.79
6 179_200000 0.016901133 1429 0.80
6 179_100000 0 1430 0.81
6 179_0 0.011296128 1431 0.81
6 262_200000 0.017424225 1432 0.82
6 262_100000 0 1433 0.84
6 262_0 0.021360768 1434 0.84
6 16_2300000 0.007444346 1435 0.86
6 16_2200000 0 1436 0.87
6 16_2100000 0 1437 0.87
6 16_2000000 0 1438 0.87
6 16_1900000 0.02209097 1439 0.87
6 16_1800000 0.012316111 1440 0.89
6 16_1700000 0 1441 0.90
6 16_1600000 0 1442 0.90
6 16_1500000 0.005779762 1443 0.90
6 16_1400000 0.014976317 1444 0.91
6 16_1300000 0 1445 0.93
6 16_1200000 0.008597088 1446 0.93
6 16_1100000 0.012976893 1447 0.93
6 16_1000000 0.01133893 1448 0.95
6 16_900000 0.031430847 1449 0.96
6 16_800000 0 1450 0.99
6 16_700000 0.018795982 1451 0.99
6 16_600000 0 1452 1.01
6 16_500000 0.006000277 1453 1.01
6 16_400000 0.00803419 1454 1.01
6 16_300000 0.022972113 1455 1.02
6 16_200000 0.017613255 1456 1.05
6 16_100000 0 1457 1.06
6 16_0 1458 1.06
7 251_100000 0 1459 0.00
7 251_0 0 1460 0.00
7 452_0 0.00974027 1461 0.00
7 335_0 0 1462 0.01
7 335_100000 0.017953919 1463 0.01
7 113_900000 0.069034002 1464 0.03
7 113_600000 0 1465 0.10
7 113_500000 0.006544223 1466 0.10
7 113_400000 0.017073572 1467 0.10
7 113_300000 0.035392271 1468 0.12
7 113_100000 0 1469 0.16
7 130_800000 0.043298288 1470 0.16
7 130_700000 0 1471 0.20
7 130_600000 0.011430741 1472 0.20
7 130_500000 0.016660848 1473 0.21
7 130_400000 0.005474416 1474 0.23
7 130_300000 0 1475 0.23
7 130_100000 0 1476 0.23
7 130_0 0 1477 0.23
7 416_0 0.005593998 1478 0.23
7 255_100000 0 1479 0.24
7 255_200000 0.016678451 1480 0.24
7 476_0 0 1481 0.25
7 9c_500000 0 1482 0.25
7 9c_400000 0 1483 0.25
7 9c_300000 0.0168872 1484 0.25
7 9c_200000 0 1485 0.27
7 9c_100000 0.019272453 1486 0.27
7 114b_0 0.01973686 1487 0.29
7 114b_100000 0 1488 0.31
7 114b_200000 0.017374645 1489 0.31
7 114b_400000 0.030620123 1490 0.33
7 114b_500000 0.007683031 1491 0.36
7 274_0 0.008413824 1492 0.37
7 274_100000 0 1493 0.37
7 274_200000 0.002730231 1494 0.37
7 29_0 0.006654475 1495 0.38
7 29_100000 0 1496 0.38
7 29_200000 0 1497 0.38
7 29_300000 0 1498 0.38
7 29_400000 0 1499 0.38
7 29_500000 0 1500 0.38
7 29_600000 0 1501 0.38
7 29_800000 0 1502 0.38
7 29_900000 0 1503 0.38
7 29_1000000 0.008193164 1504 0.38
7 29_1100000 0.008644233 1505 0.39
7 29_1200000 0.006153623 1506 0.40
7 29_1300000 0 1507 0.41
7 29_1400000 0 1508 0.41
7 29_1500000 0.006331552 1509 0.41
7 29_1700000 0.015233192 1510 0.41
7 29_1800000 0 1511 0.43
7 166_500000 0.012770469 1512 0.43
7 166_400000 0.023402673 1513 0.44
7 166_200000 0.010420023 1514 0.46
7 166_0 0.014793154 1515 0.48
7 70a_0 0.016787421 1516 0.49
7 56_1000000 0 1517 0.51
7 56_1100000 0 1518 0.51
7 56_1300000 0.0315873 1519 0.51
7 47a_400000 0 1520 0.54
7 184_0 0 1521 0.54
7 105_500000 0 1522 0.54
7 105_0 0.003805193 1523 0.54
7 14_2600000 0 1524 0.54
7 14_2000000 0 1525 0.54
7 14_1900000 0.012085287 1526 0.54
7 14_1700000 0 1527 0.55
7 14_1600000 0 1528 0.55
7 14_1500000 0.013110184 1529 0.55
7 14_1400000 0.01312773 1530 0.57
7 14_1200000 0.012994061 1531 0.58
7 14_800000 0 1532 0.59
7 14_600000 0 1533 0.59
7 14_500000 0.045022791 1534 0.59
7 97a_100000 0.005640526 1535 0.64
7 97a_200000 0.00828955 1536 0.64
7 229_100000 0.007814935 1537 0.65
7 279_0 0.017086931 1538 0.66
7 351_100000 0.047186272 1539 0.68
7 351_0 1540 0.72
8 109_800000 0 1541 0.00
8 109_700000 0 1542 0.00
8 109_600000 0.005091598 1543 0.00
8 109_500000 0.003888027 1544 0.01
8 109_400000 0.025062447 1545 0.01
8 109_300000 0.018941787 1546 0.03
8 109_200000 0 1547 0.05
8 109_100000 0 1548 0.05
8 109_0 0.007165776 1549 0.05
8 233_300000 0 1550 0.06
8 233_200000 0.036399207 1551 0.06
8 233_100000 0.019442572 1552 0.10
8 233_0 0 1553 0.12
8 11_0 0.017240152 1554 0.12
8 11_100000 0 1555 0.13
8 11_200000 0.007975247 1556 0.13
8 11_300000 0.022093379 1557 0.14
8 11_400000 0.0081637 1558 0.16
8 11_500000 0.008599894 1559 0.17
8 11_600000 0.014478438 1560 0.18
8 11_700000 0.031047982 1561 0.19
8 11_800000 0.005916857 1562 0.23
8 11_900000 0 1563 0.23
8 11_1000000 0.005533682 1564 0.23
8 11_1100000 0.007349769 1565 0.24
8 11_1200000 0.014864289 1566 0.24
8 11_1300000 0 1567 0.26
8 11_1400000 0 1568 0.26
8 11_1500000 0.012953213 1569 0.26
8 11_1600000 0.006542538 1570 0.27
8 11_1700000 0 1571 0.28
8 11_1800000 0.016448883 1572 0.28
8 11_1900000 0.015629298 1573 0.30
8 11_2000000 0.009924677 1574 0.31
8 11_2100000 0 1575 0.32
8 11_2200000 0.005759188 1576 0.32
8 11_2300000 0.016698416 1577 0.33
8 11_2400000 0 1578 0.34
8 11_2500000 0.005557401 1579 0.34
8 11_2600000 0.005460787 1580 0.35
8 11_2700000 0 1581 0.35
8 11_2800000 0 1582 0.35
8 11_2900000 0 1583 0.35
8 155_0 0 1584 0.35
8 155_100000 0 1585 0.35
8 155_200000 0.021748154 1586 0.35
8 155_300000 0 1587 0.38
8 155_400000 0.010120725 1588 0.38
8 155_600000 0.03471564 1589 0.39
8 76_300000 0.007802398 1590 0.42
8 76_400000 0 1591 0.43
8 76_500000 0.008333193 1592 0.43
8 76_600000 0.008544177 1593 0.44
8 76_700000 0 1594 0.45
8 76_800000 0 1595 0.45
8 76_900000 0.008101693 1596 0.45
8 76_1000000 0 1597 0.45
8 76_1100000 0.012019287 1598 0.45
8 59_0 0.00223141 1599 0.47
8 59_100000 0.011530999 1600 0.47
8 59_200000 0.001837955 1601 0.48
8 59_600000 0 1602 0.48
8 59_700000 0.008744441 1603 0.48
8 59_900000 0.010885483 1604 0.49
8 59_1000000 0 1605 0.50
8 59_1100000 0.017704559 1606 0.50
8 217_300000 0.010082156 1607 0.52
8 77_800000 0 1608 0.53
8 77_900000 0 1609 0.53
8 77_1000000 0 1610 0.53
8 118_0 0 1611 0.53
8 118_100000 0 1612 0.53
8 118_300000 0 1613 0.53
8 118_400000 0 1614 0.53
8 171_200000 0 1615 0.53
8 38_1400000 0.00547929 1616 0.53
8 38_1500000 0 1617 0.53
8 412_0 0 1618 0.53
8 46_1500000 0 1619 0.53
8 46_1400000 0 1620 0.53
8 46_1300000 0.011155954 1621 0.53
8 46_1200000 0 1622 0.55
8 46_1100000 0 1623 0.55
8 46_800000 0 1624 0.55
8 46_700000 0 1625 0.55
8 46_600000 0.005621945 1626 0.55
8 46_500000 0 1627 0.55
8 46_400000 0 1628 0.55
8 46_300000 0 1629 0.55
8 46_200000 0 1630 0.55
8 46_100000 0 1631 0.55
8 12b_0 0 1632 0.55
8 12b_100000 0 1633 0.55
8 12b_200000 0.028460224 1634 0.55
8 12b_300000 0.02852006 1635 0.58
8 12b_400000 0 1636 0.61
8 12b_500000 0 1637 0.61
8 12b_600000 0 1638 0.61
8 12b_700000 0.009178587 1639 0.61
8 12b_800000 0.018834026 1640 0.62
8 12b_1000000 0 1641 0.64
8 12b_1100000 0.045970289 1642 0.64
8 12b_1300000 0 1643 0.68
8 12b_1400000 0 1644 0.68
8 12b_1500000 0.00577677 1645 0.68
8 12b_1600000 0.014153028 1646 0.69
8 12b_1700000 0 1647 0.70
8 12b_1800000 0 1648 0.70
8 12b_1900000 0.014765272 1649 0.70
8 12b_2000000 0.009003175 1650 0.72
8 12b_2100000 0.006913113 1651 0.73
8 12b_2200000 0.016677737 1652 0.73
8 12b_2300000 0 1653 0.75
8 12b_2400000 0 1654 0.75
8 24_2100000 0.015062848 1655 0.75
8 24_2000000 0.007042695 1656 0.76
8 24_1900000 0.011283049 1657 0.77
8 24_1800000 0 1658 0.78
8 24_1700000 0.016874364 1659 0.78
8 24_1600000 0.011145082 1660 0.80
8 24_1500000 0 1661 0.81
8 24_1400000 0.022429103 1662 0.81
8 24_1300000 0.005486686 1663 0.83
8 24_1200000 0 1664 0.84
8 24_1100000 0 1665 0.84
8 24_1000000 0.016565631 1666 0.84
8 24_900000 0 1667 0.86
8 24_800000 0 1668 0.86
8 24_700000 0 1669 0.86
8 24_600000 0.023723923 1670 0.86
8 24_500000 0.004116049 1671 0.88
8 24_400000 0.005752283 1672 0.88
8 24_300000 0.005324723 1673 0.89
8 24_200000 0.01701353 1674 0.89
8 24_100000 0 1675 0.91
8 24_0 0 1676 0.91
8 45_1600000 0.015150568 1677 0.91
8 45_1500000 0.007009332 1678 0.93
8 45_1400000 0 1679 0.93
8 45_1300000 0 1680 0.93
8 45_1200000 0.005491849 1681 0.93
8 45_1100000 0.005431105 1682 0.94
8 45_1000000 0.009749986 1683 0.94
8 45_900000 0.001446321 1684 0.95
8 45_800000 0 1685 0.96
8 45_700000 0 1686 0.96
8 45_400000 0 1687 0.96
8 45_300000 0.01713085 1688 0.96
8 45_200000 0.008373475 1689 0.97
8 45_100000 0 1690 0.98
8 45_0 0 1691 0.98
8 285_0 0 1692 0.98
8 285_100000 0 1693 0.98
8 285_200000 0 1694 0.98
8 3_0 0.018813787 1695 0.98
8 3_100000 0.011141641 1696 1.00
8 3_200000 0 1697 1.01
8 3_300000 0 1698 1.01
8 3_400000 0.031196137 1699 1.01
8 3_600000 0.007797747 1700 1.04
8 3_700000 0.035177834 1701 1.05
8 3_800000 0.036900606 1702 1.08
8 3_900000 0.000470446 1703 1.12
8 3_1000000 0 1704 1.12
8 3_1100000 0.026567975 1705 1.12
8 3_1200000 0 1706 1.15
8 3_1400000 0.00190023 1707 1.15
8 3_1500000 0.00487337 1708 1.15
8 3_1600000 0 1709 1.16
8 3_1700000 0 1710 1.16
8 3_1800000 0 1711 1.16
8 3_1900000 0 1712 1.16
8 3_2000000 0.008811709 1713 1.16
8 3_2100000 0.008325592 1714 1.16
8 3_2200000 0.007515522 1715 1.17
8 3_2300000 0.030996463 1716 1.18
8 3_2400000 0.005540671 1717 1.21
8 3_2500000 0.005844312 1718 1.22
8 3_2600000 0 1719 1.22
8 3_2700000 0.013723475 1720 1.22
8 3_2800000 0 1721 1.24
8 3_2900000 0.013920513 1722 1.24
8 3_3000000 0 1723 1.25
8 3_3100000 0.013306591 1724 1.25
8 3_3200000 0.009230436 1725 1.26
8 3_3300000 0.01607635 1726 1.27
8 3_3400000 0.015308034 1727 1.29
8 3_3500000 0 1728 1.30
8 3_3600000 0.006641822 1729 1.30
8 3_3700000 0 1730 1.31
8 3_3800000 0.018493157 1731 1.31
8 3_3900000 0.006741036 1732 1.33
8 3_4000000 0.026896991 1733 1.34
8 3_4100000 0 1734 1.36
8 3_4200000 0 1735 1.36
8 3_4300000 1736 1.36
9 68b_1100000 0.033857321 1737 0.00
9 68b_900000 0.011543294 1738 0.03
9 68b_800000 0.016828107 1739 0.05
9 68b_700000 0.026209272 1740 0.06
9 68b_600000 0.003568084 1741 0.09
9 68b_500000 0.008555253 1742 0.09
9 68b_400000 0.018044593 1743 0.10
9 68b_300000 0.017076993 1744 0.12
9 68b_200000 0 1745 0.14
9 68b_100000 0.005579848 1746 0.14
9 68b_0 0.005788805 1747 0.14
9 124_700000 0 1748 0.15
9 124_600000 0 1749 0.15
9 124_500000 0.01274915 1750 0.15
9 124_400000 0 1751 0.16
9 124_300000 0.026473534 1752 0.16
9 124_100000 0 1753 0.19
9 124_0 0.01175138 1754 0.19
9 20_2300000 0.011748948 1755 0.20
9 20_2200000 0 1756 0.21
9 20_2100000 0 1757 0.21
9 20_2000000 0.011157677 1758 0.21
9 20_1900000 0 1759 0.22
9 20_1800000 0.005668141 1760 0.22
9 20_1600000 0 1761 0.23
9 20_1400000 0 1762 0.23
9 20_1300000 0 1763 0.23
9 20_1200000 0 1764 0.23
9 20_1100000 0.005942556 1765 0.23
9 20_900000 0.005942012 1766 0.23
9 20_800000 0 1767 0.24
9 20_700000 0 1768 0.24
9 20_600000 0 1769 0.24
9 20_500000 0 1770 0.24
9 20_400000 0.006216859 1771 0.24
9 20_300000 0.006384311 1772 0.24
9 330_0 0 1773 0.25
9 86_400000 0 1774 0.25
9 86_300000 0 1775 0.25
9 86_100000 0 1776 0.25
9 12a_300000 0 1777 0.25
9 12a_100000 0.01115324 1778 0.25
9 12a_0 0 1779 0.26
9 117a_100000 0.078470972 1780 0.26
9 283_100000 0.08972317 1781 0.34
9 147_500000 0 1782 0.43
9 147_600000 0 1783 0.43
9 152_100000 0.010563439 1784 0.43
9 103b_0 0 1785 0.44
9 103b_200000 0.014870552 1786 0.44
9 103b_300000 0 1787 0.46
9 84_1000000 0.004426514 1788 0.46
9 84_900000 0.009159914 1789 0.46
9 84_700000 0 1790 0.47
9 84_600000 0 1791 0.47
9 84_500000 0.009738597 1792 0.47
9 84_400000 0 1793 0.48
9 84_300000 0 1794 0.48
9 84_200000 0 1795 0.48
9 84_0 0 1796 0.48
9 97c_100000 0.011652327 1797 0.48
9 157_300000 0 1798 0.49
9 157_400000 0 1799 0.49
9 157_500000 0 1800 0.49
9 238_200000 0 1801 0.49
9 63b_900000 0.005725997 1802 0.49
9 63b_600000 0 1803 0.50
9 63b_500000 0.016843551 1804 0.50
9 63b_400000 0 1805 0.51
9 63b_200000 0 1806 0.51
9 63b_100000 0.005565067 1807 0.51
9 63b_0 0 1808 0.52
9 257a_0 0.002412981 1809 0.52
9 257a_100000 0.002432376 1810 0.52
9 7_3700000 0 1811 0.52
9 7_3600000 0.023250333 1812 0.52
9 7_3400000 0 1813 0.55
9 7_3300000 0 1814 0.55
9 7_3200000 0 1815 0.55
9 7_3100000 0.036999408 1816 0.55
9 7_3000000 0 1817 0.58
9 7_2900000 0 1818 0.58
9 7_2800000 0.025163416 1819 0.58
9 7_2600000 0 1820 0.61
9 7_2500000 0.011263584 1821 0.61
9 7_2400000 0.023368991 1822 0.62
9 7_2300000 0 1823 0.64
9 7_2200000 0.018415458 1824 0.64
9 7_2100000 0 1825 0.66
9 7_2000000 0.009841523 1826 0.66
9 7_1900000 0.011371632 1827 0.67
9 7_1800000 0.01583887 1828 0.68
9 7_1700000 0.023311911 1829 0.70
9 7_1600000 0.062189021 1830 0.72
9 7_1400000 0.017954883 1831 0.78
9 7_1300000 0.038257974 1832 0.80
9 7_1200000 0 1833 0.84
9 7_1100000 0 1834 0.84
9 7_1000000 0.006944008 1835 0.84
9 7_900000 0.031440022 1836 0.85
9 7_800000 0 1837 0.88
9 7_700000 0.008548011 1838 0.88
9 7_600000 0.012215321 1839 0.89
9 7_500000 0.025130883 1840 0.90
9 7_400000 0.028727006 1841 0.93
9 7_300000 0 1842 0.95
9 7_200000 0.015296118 1843 0.95
9 7_100000 0 1844 0.97
9 7_0 1845 0.97'''.split('\n')
IMPR_rils = '''10 13_1200000 0.010547795 1 0.000
10 13_1100000 0 2 0.011
10 13_1000000 0.017631604 3 0.011
10 13_900000 0.002155225 4 0.028
10 13_800000 0 5 0.030
10 13_700000 0 6 0.030
10 13_600000 0 7 0.030
10 13_500000 0 8 0.030
10 13_400000 0 9 0.030
10 13_300000 0 10 0.030
10 13_200000 0 11 0.030
10 13_100000 0 12 0.030
10 13_0 0 13 0.030
10 749_0 0 14 0.030
10 90_100000 0 15 0.030
10 90_200000 0 16 0.030
10 90_300000 0 17 0.030
10 90_400000 0 18 0.030
10 90_500000 0 19 0.030
10 90_600000 0 20 0.030
10 90_700000 0 21 0.030
10 90_800000 0 22 0.030
10 90_900000 0 23 0.030
10 48a_0 0 24 0.030
10 48a_100000 0 25 0.030
10 48a_200000 0 26 0.030
10 48a_300000 0 27 0.030
10 48a_400000 0 28 0.030
10 48a_500000 0 29 0.030
10 48a_600000 0 30 0.030
10 48a_700000 0 31 0.030
10 48a_800000 0 32 0.030
10 223_300000 0 33 0.030
10 223_200000 0 34 0.030
10 223_100000 0 35 0.030
10 223_0 0 36 0.030
10 445_0 0 37 0.030
10 324_100000 0 38 0.030
10 324_0 0 39 0.030
10 210_0 0 40 0.030
10 210_100000 0 41 0.030
10 210_200000 0 42 0.030
10 210_300000 0 43 0.030
10 206_300000 0 44 0.030
10 206_200000 0 45 0.030
10 206_100000 0 46 0.030
10 206_0 0 47 0.030
10 4a_200000 0 48 0.030
10 4a_100000 0 49 0.030
10 4a_0 0.003855028 50 0.030
10 40_500000 0 51 0.034
10 40_600000 0 52 0.034
10 40_700000 0 53 0.034
10 40_800000 0.001328288 54 0.034
10 40_900000 0.016830492 55 0.036
10 40_1000000 0.005979824 56 0.052
10 40_1100000 0 57 0.058
10 40_1200000 0 58 0.058
10 40_1300000 0.004948211 59 0.058
10 40_1400000 0 60 0.063
10 40_1500000 0.02734502 61 0.063
10 267_200000 0 62 0.091
10 267_100000 0 63 0.091
10 267_0 0.017941401 64 0.091
10 33_1700000 0.019946004 65 0.109
10 33_1600000 0 66 0.129
10 33_1400000 0 67 0.129
10 33_1300000 0 68 0.129
10 33_1200000 0 69 0.129
10 33_1100000 0.038306587 70 0.129
10 33_1000000 0.007452688 71 0.167
10 33_900000 0 72 0.174
10 33_700000 0 73 0.174
10 33_600000 0.012768328 74 0.174
10 9a_1100000 0 75 0.187
10 9a_1000000 0 76 0.187
10 9a_900000 0 77 0.187
10 9a_800000 0 78 0.187
10 9a_700000 0.017789032 79 0.187
10 9a_500000 0.008912633 80 0.205
10 9a_300000 0 81 0.214
10 9a_100000 0 82 0.214
10 156_100000 0 83 0.214
10 156_300000 0 84 0.214
10 156_400000 0 85 0.214
10 172_100000 0.015033661 86 0.214
10 172_200000 0 87 0.229
10 172_300000 0.007070837 88 0.229
10 172_400000 0 89 0.236
10 172_500000 0.010658446 90 0.236
10 50_100000 0 91 0.247
10 50_200000 0 92 0.247
10 50_300000 0.026324841 93 0.247
10 50_800000 0 94 0.273
10 50_1100000 0 95 0.273
10 50_1200000 0.026182179 96 0.273
10 50_1300000 0 97 0.299
10 50_1400000 0.01679944 98 0.299
10 209_0 0.005168166 99 0.316
10 209_100000 0 100 0.321
10 209_200000 0 101 0.321
10 209_300000 0.006272384 102 0.321
10 490_0 0.058817053 103 0.327
10 125b_600000 0.019299514 104 0.386
10 125b_500000 0 105 0.405
10 125b_400000 0.005383183 106 0.405
10 125b_300000 0.021230103 107 0.411
10 125b_200000 0 108 0.432
10 125b_100000 0 109 0.432
10 125b_0 0.063627164 110 0.432
10 188_400000 0 111 0.496
10 188_300000 0.012121336 112 0.496
10 188_200000 0 113 0.508
10 311_100000 0.020576539 114 0.508
10 311_0 0 115 0.528
10 204_0 0 116 0.528
10 204_100000 0.016173811 117 0.528
10 204_200000 0 118 0.544
10 204_300000 0 119 0.544
10 204_400000 0.012384256 120 0.544
10 87_0 0.027944579 121 0.557
10 87_100000 0.007630954 122 0.585
10 87_200000 0.007986273 123 0.592
10 87_300000 0.019119164 124 0.600
10 87_400000 0.007528137 125 0.620
10 87_500000 0 126 0.627
10 87_600000 0.02486068 127 0.627
10 87_700000 0 128 0.652
10 87_800000 0.035209659 129 0.652
10 87_900000 0 130 0.687
10 87_1000000 0.007838858 131 0.687
10 159_0 0.022898936 132 0.695
10 159_100000 0.010545093 133 0.718
10 159_200000 0 134 0.728
10 159_300000 0.016175721 135 0.728
10 159_400000 0 136 0.745
10 159_500000 137 0.745
11 75_100000 0 138 0.000
11 75_200000 0.005108174 139 0.000
11 75_300000 0.014945275 140 0.005
11 75_400000 0.008491395 141 0.020
11 75_500000 0 142 0.029
11 75_600000 0 143 0.029
11 75_700000 0.052957362 144 0.029
11 75_900000 0 145 0.082
11 75_1000000 0 146 0.082
11 75_1100000 0 147 0.082
11 228_0 0.019349133 148 0.082
11 228_100000 0.012844921 149 0.101
11 228_200000 0 150 0.114
11 228_300000 0.02414387 151 0.114
11 273b_0 0 152 0.138
11 273b_100000 0.025146319 153 0.138
11 213_0 0.005892438 154 0.163
11 213_100000 0.006335147 155 0.169
11 213_200000 0 156 0.175
11 213_300000 0.014990019 157 0.175
11 63a_0 0.002393662 158 0.190
11 63a_100000 0.011782609 159 0.193
11 63a_200000 0.029129244 160 0.204
11 30_1900000 0.011270536 161 0.234
11 30_1800000 0.001843606 162 0.245
11 30_1700000 0.019929503 163 0.247
11 30_1600000 0 164 0.267
11 30_1500000 0 165 0.267
11 30_1400000 0.010570745 166 0.267
11 30_1300000 0.015582659 167 0.277
11 30_1200000 0.006482859 168 0.293
11 30_1100000 0 169 0.299
11 48b_600000 0.012280479 170 0.299
11 48b_500000 0.012283343 171 0.311
11 48b_400000 0.012281092 172 0.324
11 48b_300000 0 173 0.336
11 48b_200000 0.02285699 174 0.336
11 243_0 0.01387298 175 0.359
11 243_100000 0 176 0.373
11 243_200000 0.25 177 0.373
11 779_0 0.026752502 178 0.623
11 161_0 0.006854931 179 0.650
11 161_100000 0.011479066 180 0.656
11 161_200000 0 181 0.668
11 161_300000 0 182 0.668
11 161_400000 0.003981592 183 0.668
11 161_500000 0 184 0.672
11 162_500000 0 185 0.672
11 162_400000 0.020088796 186 0.672
11 162_300000 0.007000863 187 0.692
11 162_200000 0 188 0.699
11 162_100000 0.029966667 189 0.699
11 598_0 0 190 0.729
11 100_800000 0 191 0.729
11 100_700000 0.03668059 192 0.729
11 100_600000 0 193 0.766
11 100_500000 0 194 0.766
11 100_400000 0 195 0.766
11 100_300000 0.010811141 196 0.766
11 100_200000 0 197 0.776
11 100_100000 0 198 0.776
11 100_0 0.005396102 199 0.776
11 221a_0 0 200 0.782
11 221a_100000 0.008105441 201 0.782
11 167_0 0 202 0.790
11 167_100000 0 203 0.790
11 167_200000 0 204 0.790
11 131_0 0 205 0.790
11 131_600000 0 206 0.790
11 239_0 0.004158785 207 0.790
11 239_100000 0 208 0.794
11 239_200000 0.004152227 209 0.794
11 239_300000 0 210 0.798
11 49_0 0.012318601 211 0.798
11 49_200000 0 212 0.811
11 49_300000 0 213 0.811
11 49_500000 0 214 0.811
11 49_1100000 0.040684576 215 0.811
11 145_0 0 216 0.851
11 6b_1200000 0.02317087 217 0.851
11 47b_900000 0 218 0.874
11 47b_400000 0.015418931 219 0.874
11 47b_100000 0.002233293 220 0.890
11 39b_600000 0.004217939 221 0.892
11 39b_500000 0.019390642 222 0.896
11 39b_400000 0 223 0.916
11 39b_300000 0.017089526 224 0.916
11 39b_200000 0 225 0.933
11 39b_100000 0.01702552 226 0.933
11 185_0 0 227 0.950
11 185_100000 0.013697121 228 0.950
11 185_400000 0.01113116 229 0.963
11 182_400000 0 230 0.975
11 182_300000 0.00530292 231 0.975
11 182_200000 0.010678134 232 0.980
11 182_100000 0 233 0.991
11 415_0 234 0.991
12 363_0 0 235 0.000
12 58b_900000 0 236 0.000
12 58b_800000 0.004052797 237 0.000
12 58b_700000 0 238 0.004
12 58b_600000 0 239 0.004
12 58b_500000 0.002572256 240 0.004
12 58b_400000 0.007524875 241 0.007
12 58b_300000 0.014980474 242 0.014
12 58b_200000 0 243 0.029
12 58b_100000 0 244 0.029
12 58b_0 0.003974328 245 0.029
12 220_400000 0 246 0.033
12 220_300000 0.023654513 247 0.033
12 220_200000 0 248 0.057
12 220_100000 0.009911752 249 0.057
12 220_0 0.00937729 250 0.067
12 336_100000 0.028053596 251 0.076
12 336_0 0.012021308 252 0.104
12 132_700000 0.014524727 253 0.116
12 132_600000 0.027803721 254 0.131
12 132_500000 0.025733873 255 0.158
12 132_400000 0 256 0.184
12 132_300000 0 257 0.184
12 132_200000 0.018118356 258 0.184
12 132_100000 0.007985937 259 0.202
12 132_0 0 260 0.210
12 224_300000 0 261 0.210
12 224_200000 0.010445771 262 0.210
12 224_100000 0.010444671 263 0.221
12 224_0 0 264 0.231
12 380_0 0 265 0.231
12 785_0 0.013457183 266 0.231
12 332_100000 0.013457166 267 0.245
12 332_0 0 268 0.258
12 349_0 0 269 0.258
12 349_100000 0.034282835 270 0.258
12 703_0 0 271 0.292
12 381_100000 0 272 0.292
12 299_300000 0.01916481 273 0.292
12 299_100000 0 274 0.312
12 201_300000 0 275 0.312
12 201_200000 0 276 0.312
12 201_0 0.026757889 277 0.312
12 91_200000 0 278 0.338
12 91_300000 0 279 0.338
12 91_400000 0.02025849 280 0.338
12 91_500000 0 281 0.359
12 91_600000 0 282 0.359
12 91_800000 0.083592597 283 0.359
12 917_0 0 284 0.442
12 394_0 0.005351385 285 0.442
12 143_100000 0 286 0.448
12 143_200000 0 287 0.448
12 143_300000 0.006295366 288 0.448
12 241_0 0.005715082 289 0.454
12 241_100000 0.012120184 290 0.460
12 241_300000 0 291 0.472
12 160_0 0 292 0.472
12 160_100000 0.014896278 293 0.472
12 160_200000 0 294 0.487
12 160_300000 0.0050933 295 0.487
12 160_400000 0 296 0.492
12 114a_0 0 297 0.492
12 114a_200000 0.003827429 298 0.492
12 114a_300000 0.003783698 299 0.495
12 450_0 0.016251482 300 0.499
12 31_1300000 0 301 0.515
12 31_800000 0 302 0.515
12 31_700000 0.004548946 303 0.515
12 31_200000 0 304 0.520
12 337_100000 0 305 0.520
12 78_1000000 0 306 0.520
12 78_1100000 0.010075448 307 0.520
12 553_0 0.119689287 308 0.530
12 250_0 0 309 0.650
12 250_100000 0.019535922 310 0.650
12 125a_0 0.00621964 311 0.669
12 125a_100000 0 312 0.676
12 271_200000 0.005659362 313 0.676
12 271_100000 0.039937102 314 0.681
12 497_0 0 315 0.721
12 819_0 0 316 0.721
12 577_0 0 317 0.721
12 142_600000 0.014116397 318 0.721
12 142_500000 0 319 0.735
12 142_400000 0 320 0.735
12 142_300000 0.001933423 321 0.735
12 142_200000 0.002102273 322 0.737
12 142_100000 0 323 0.739
12 142_0 0.023673124 324 0.739
12 435_0 0.00956687 325 0.763
12 140a_200000 0.006041788 326 0.773
12 140a_100000 0.010653161 327 0.779
12 140a_0 0 328 0.789
12 37_1700000 0.019055068 329 0.789
12 37_1600000 0 330 0.808
12 37_1500000 0.00598702 331 0.808
12 37_1400000 0 332 0.814
12 37_1300000 0.036623418 333 0.814
12 37_1200000 0 334 0.851
12 37_1100000 0.016951319 335 0.851
12 37_1000000 0 336 0.868
12 37_900000 0.035788335 337 0.868
12 37_800000 0.008680911 338 0.904
12 37_700000 0 339 0.912
12 37_600000 0 340 0.912
12 37_500000 0.009850467 341 0.912
12 37_400000 0.001688134 342 0.922
12 37_300000 0.023589157 343 0.924
12 37_200000 0 344 0.947
12 37_100000 0 345 0.947
12 37_0 0.020807492 346 0.947
12 905_0 0.012994345 347 0.968
12 334_0 0 348 0.981
12 334_100000 0 349 0.981
12 297_200000 0.00410226 350 0.981
12 297_100000 0 351 0.985
12 297_0 0.010985616 352 0.985
12 39a_700000 0.024764426 353 0.996
12 39a_600000 0.006467043 354 1.021
12 39a_500000 0.030059323 355 1.028
12 39a_400000 0 356 1.058
12 39a_300000 0 357 1.058
12 39a_200000 0.035646441 358 1.058
12 39a_100000 0 359 1.093
12 39a_0 0.062677966 360 1.093
12 17b_0 0 361 1.156
12 17b_100000 0.012305723 362 1.156
12 17b_200000 0 363 1.168
12 17b_300000 0.012493144 364 1.168
12 17b_400000 0.021022797 365 1.181
12 17b_500000 0.018217096 366 1.202
12 17b_600000 0.008891942 367 1.220
12 17b_700000 0.009956929 368 1.229
12 17b_800000 0.026326729 369 1.239
12 17b_900000 0 370 1.265
12 17b_1000000 0.011508536 371 1.265
12 17b_1100000 0.00076541 372 1.277
12 17b_1200000 0.010394264 373 1.277
12 683_0 0.02146049 374 1.288
12 44b_0 0 375 1.309
12 44b_100000 0 376 1.309
12 44b_200000 0.023905558 377 1.309
12 44b_300000 0 378 1.333
12 44b_400000 0.012050429 379 1.333
12 44b_500000 0 380 1.345
12 44b_600000 381 1.345
13 122_100000 0 382 0.000
13 122_200000 0.010370958 383 0.000
13 122_300000 0 384 0.010
13 122_400000 0.037538828 385 0.010
13 122_500000 0.031578947 386 0.048
13 122_600000 0 387 0.079
13 122_700000 0.007521224 388 0.079
13 235_300000 0 389 0.087
13 235_200000 0.02332564 390 0.087
13 235_100000 0 391 0.110
13 235_0 0 392 0.110
13 52_0 0.015155602 393 0.110
13 52_200000 0 394 0.125
13 52_300000 0.035552412 395 0.125
13 52_800000 0 396 0.161
13 52_1300000 0 397 0.161
13 52_1400000 0.037465382 398 0.161
13 390_0 0.009373884 399 0.199
13 129b_200000 0.00467556 400 0.208
13 129b_300000 0 401 0.213
13 129b_400000 0.009829976 402 0.213
13 369_0 0.030406296 403 0.222
13 329_100000 0 404 0.253
13 329_0 0.012129632 405 0.253
13 139_600000 0 406 0.265
13 139_500000 0 407 0.265
13 139_400000 0 408 0.265
13 139_300000 0 409 0.265
13 139_200000 0 410 0.265
13 186_0 0 411 0.265
13 186_100000 0.010803487 412 0.265
13 186_200000 0 413 0.276
13 186_300000 0 414 0.276
13 28_1600000 0.003141039 415 0.276
13 28_1400000 0.002260519 416 0.279
13 28_1300000 0 417 0.281
13 28_1200000 0 418 0.281
13 28_1100000 0.005814625 419 0.281
13 34_600000 0 420 0.287
13 34_700000 0.007558735 421 0.287
13 34_1100000 0 422 0.295
13 34_1200000 0 423 0.295
13 34_1400000 0.024474741 424 0.295
13 264_200000 0 425 0.319
13 264_100000 0 426 0.319
13 264_0 0.025241966 427 0.319
13 236_100000 0 428 0.344
13 236_200000 0.014129596 429 0.344
13 225_300000 0 430 0.358
13 225_200000 0 431 0.358
13 107_100000 0.027687894 432 0.358
13 107_200000 0 433 0.386
13 107_300000 0.0138106 434 0.386
13 107_700000 0 435 0.400
13 107_800000 0.013295404 436 0.400
13 402_0 0 437 0.413
13 230_0 0 438 0.413
13 230_300000 0.028713255 439 0.413
13 174_100000 0.010062014 440 0.442
13 174_200000 0 441 0.452
13 174_300000 0 442 0.452
13 174_400000 0 443 0.452
13 344_0 0.055599943 444 0.452
13 108_800000 0.006805558 445 0.508
13 108_700000 0.008639193 446 0.514
13 108_600000 0.02322937 447 0.523
13 108_500000 0 448 0.546
13 108_400000 0.017317465 449 0.546
13 108_300000 0.011657078 450 0.564
13 108_200000 0.00967903 451 0.575
13 108_100000 0 452 0.585
13 108_0 0 453 0.585
13 245_300000 0.011015672 454 0.585
13 245_200000 0.01378722 455 0.596
13 245_100000 0 456 0.610
13 245_0 0.039411973 457 0.610
13 4b_3800000 0.00908018 458 0.649
13 4b_3700000 0.016332873 459 0.658
13 4b_3600000 0 460 0.674
13 4b_3500000 0.021364566 461 0.674
13 4b_3400000 0 462 0.696
13 4b_3300000 0.011081052 463 0.696
13 4b_3200000 0 464 0.707
13 4b_3100000 0.025991499 465 0.707
13 4b_3000000 0 466 0.733
13 4b_2900000 0.018836736 467 0.733
13 4b_2800000 0.007748189 468 0.752
13 4b_2700000 0 469 0.759
13 4b_2600000 0.008888142 470 0.759
13 4b_2500000 0.028458024 471 0.768
13 4b_2400000 0.00283497 472 0.797
13 4b_2300000 0.019280143 473 0.800
13 4b_2200000 0.022039297 474 0.819
13 4b_2100000 0 475 0.841
13 4b_2000000 0.024000273 476 0.841
13 4b_1900000 0.012664128 477 0.865
13 4b_1800000 0 478 0.878
13 4b_1700000 0.014816318 479 0.878
13 4b_1600000 0.008982817 480 0.892
13 4b_1500000 0.00866051 481 0.901
13 4b_1400000 0.030330751 482 0.910
13 4b_1300000 0 483 0.940
13 4b_1200000 0.020637501 484 0.940
13 4b_1100000 0 485 0.961
13 4b_1000000 0 486 0.961
13 4b_900000 0.04013631 487 0.961
13 4b_800000 0.005913366 488 1.001
13 4b_700000 0.011920278 489 1.007
13 4b_600000 0 490 1.019
13 4b_500000 0 491 1.019
13 4b_400000 0.024138296 492 1.019
13 4b_300000 0.006485148 493 1.043
13 4b_200000 0.034218423 494 1.050
13 4b_100000 0.00718858 495 1.084
13 4b_0 0 496 1.091
13 83b_800000 0 497 1.091
13 83b_700000 0.01582658 498 1.091
13 83b_600000 0.0456031 499 1.107
13 83b_500000 0 500 1.153
13 83b_400000 0 501 1.153
13 83b_300000 0.027362557 502 1.153
13 83b_200000 0.013809266 503 1.180
13 83b_100000 0.004432115 504 1.194
13 83b_0 0.000569915 505 1.198
13 115a_300000 0.009883738 506 1.199
13 115a_200000 0.009883616 507 1.209
13 115a_100000 0 508 1.218
13 115a_0 509 1.218
14 102_0 0 510 0.000
14 102_100000 0.008677991 511 0.000
14 102_200000 0 512 0.009
14 102_300000 0.04128302 513 0.009
14 102_400000 0.007770441 514 0.050
14 102_500000 0.01707378 515 0.058
14 102_600000 0.013208548 516 0.075
14 102_700000 0.013707552 517 0.088
14 102_800000 0.009243319 518 0.102
14 102_900000 0.024124676 519 0.111
14 6c_0 0 520 0.135
14 6c_100000 0.010158717 521 0.135
14 26a_0 0.004707694 522 0.145
14 26a_100000 0.015156373 523 0.150
14 26a_200000 0 524 0.165
14 26a_300000 0.00839957 525 0.165
14 26a_400000 0.001328185 526 0.174
14 26a_500000 0.022407793 527 0.175
14 26a_600000 0 528 0.197
14 26a_700000 0.026791524 529 0.197
14 26a_800000 0 530 0.224
14 26a_900000 0 531 0.224
14 26a_1000000 0.014336316 532 0.224
14 26a_1100000 0.021251048 533 0.238
14 26a_1200000 0 534 0.260
14 26a_1300000 0.066631386 535 0.260
14 278_0 0 536 0.326
14 278_100000 0.014618874 537 0.326
14 148_100000 0.019856738 538 0.341
14 148_200000 0 539 0.361
14 148_300000 0 540 0.361
14 148_400000 0.004021844 541 0.361
14 148_500000 0.004204665 542 0.365
14 198_0 0 543 0.369
14 198_100000 0.004334919 544 0.369
14 198_200000 0.011960806 545 0.373
14 198_300000 0.019667351 546 0.385
14 101_0 0.004064084 547 0.405
14 101_100000 0.003476257 548 0.409
14 101_200000 0.00622064 549 0.412
14 101_300000 0.02727873 550 0.419
14 101_400000 0.011339423 551 0.446
14 101_600000 0 552 0.457
14 101_700000 0 553 0.457
14 101_800000 0.006031899 554 0.457
14 17a_900000 0 555 0.463
14 17a_800000 0 556 0.463
14 17a_700000 0.022013553 557 0.463
14 17a_500000 0.016723647 558 0.485
14 17a_400000 0 559 0.502
14 17a_300000 0.006150958 560 0.502
14 17a_200000 0 561 0.508
14 17a_100000 0.027034287 562 0.508
14 453_0 0 563 0.535
14 208_0 0.005504443 564 0.535
14 208_100000 0 565 0.541
14 208_300000 0 566 0.541
14 208_400000 0 567 0.541
14 247_200000 0 568 0.541
14 247_100000 0.011671549 569 0.541
14 247_0 0 570 0.552
14 419_0 0.014469765 571 0.552
14 207_300000 0 572 0.567
14 207_200000 0.023987939 573 0.567
14 207_100000 0 574 0.591
14 207_0 0 575 0.591
14 164_500000 0 576 0.591
14 164_400000 0 577 0.591
14 164_300000 0.015976022 578 0.591
14 164_200000 0.016003106 579 0.607
14 164_100000 0 580 0.623
14 164_0 0.016325325 581 0.623
14 123_700000 0 582 0.639
14 123_600000 0.006506425 583 0.639
14 123_500000 0.015436288 584 0.646
14 123_400000 0.007689281 585 0.661
14 123_300000 0.008179537 586 0.669
14 123_200000 0.014755716 587 0.677
14 123_100000 0.003533293 588 0.692
14 123_0 0.007025709 589 0.695
14 15_0 0.01171541 590 0.702
14 15_100000 0.024445146 591 0.714
14 15_200000 0.006950596 592 0.738
14 15_300000 0.010110939 593 0.745
14 15_400000 0.042040916 594 0.756
14 15_500000 0 595 0.798
14 15_600000 0 596 0.798
14 15_700000 0 597 0.798
14 15_800000 0 598 0.798
14 15_1000000 0.038905165 599 0.798
14 127_400000 0 600 0.836
14 127_300000 0 601 0.836
14 127_200000 0.015730684 602 0.836
14 168_200000 0.004926756 603 0.852
14 168_300000 0 604 0.857
14 168_400000 0 605 0.857
14 168_500000 0 606 0.857
14 211_400000 0 607 0.857
14 211_300000 0 608 0.857
14 211_200000 0 609 0.857
14 211_100000 0 610 0.857
14 211_0 0.017591232 611 0.857
14 300_200000 0 612 0.875
14 300_100000 0.014397007 613 0.875
14 300_0 0.014976849 614 0.889
14 126_700000 0.011785208 615 0.904
14 126_600000 0 616 0.916
14 126_500000 0 617 0.916
14 126_400000 0 618 0.916
14 126_300000 0.014279335 619 0.916
14 126_200000 0 620 0.930
14 126_100000 0 621 0.930
14 126_0 0 622 0.930
14 291_0 0 623 0.930
14 291_100000 0.008085356 624 0.930
14 291_200000 0 625 0.938
14 361_100000 0 626 0.938
14 361_0 0 627 0.938
14 121_800000 0.014130279 628 0.938
14 121_700000 0.01370603 629 0.952
14 121_600000 0.016930064 630 0.966
14 121_400000 0 631 0.983
14 121_300000 0.006152923 632 0.983
14 121_200000 0.024602021 633 0.989
14 121_100000 0.026891099 634 1.014
14 303_100000 0 635 1.041
14 303_0 0.025309135 636 1.041
14 195_0 0 637 1.066
14 195_100000 0 638 1.066
14 195_200000 0 639 1.066
14 219_0 0 640 1.066
14 219_100000 0 641 1.066
14 219_200000 0.004254159 642 1.066
14 323_0 0 643 1.070
14 323_100000 0 644 1.070
14 366_0 0.005921035 645 1.070
14 128_100000 0.010433138 646 1.076
14 128_200000 0.007894741 647 1.087
14 128_300000 0 648 1.094
14 128_400000 0.011068234 649 1.094
14 128_500000 0.011192189 650 1.106
14 128_600000 0.010911917 651 1.117
14 178_0 0 652 1.128
14 178_100000 0 653 1.128
14 178_200000 0 654 1.128
14 178_300000 0 655 1.128
14 178_400000 0.004093869 656 1.128
14 178_500000 0.014460609 657 1.132
14 140b_400000 0 658 1.146
14 140b_300000 0 659 1.146
14 140b_200000 0.014430131 660 1.146
14 140b_100000 0 661 1.161
14 140b_0 0 662 1.161
14 92_0 0.012412938 663 1.161
14 92_100000 0 664 1.173
14 92_200000 0.004158521 665 1.173
14 92_300000 0.01258699 666 1.177
14 92_400000 0 667 1.190
14 92_500000 0 668 1.190
14 92_600000 0.015852758 669 1.190
14 92_700000 0 670 1.206
14 92_800000 0 671 1.206
14 92_900000 0.02023764 672 1.206
14 459_0 0 673 1.226
14 290_0 0.008039086 674 1.226
14 290_100000 0 675 1.234
14 290_200000 0 676 1.234
14 2_0 0.022507739 677 1.234
14 2_100000 0 678 1.256
14 2_200000 0.024674531 679 1.256
14 2_300000 0 680 1.281
14 2_400000 0 681 1.281
14 2_500000 0.012760205 682 1.281
14 2_600000 0.019484111 683 1.294
14 2_700000 0.008401362 684 1.313
14 2_800000 0.033644869 685 1.322
14 2_1000000 0 686 1.355
14 2_1100000 0.019680209 687 1.355
14 2_1200000 0.104950004 688 1.375
14 2_1700000 0.020253167 689 1.480
14 2_1800000 0.031997091 690 1.500
14 2_2000000 0.010673733 691 1.532
14 2_2100000 0 692 1.543
14 2_2200000 0.035574482 693 1.543
14 2_2300000 0.001114272 694 1.579
14 2_2400000 0.025705967 695 1.580
14 2_2500000 0 696 1.605
14 2_2600000 0.026552476 697 1.605
14 2_2700000 0 698 1.632
14 2_2800000 0 699 1.632
14 2_2900000 0.01637013 700 1.632
14 2_3000000 0.007998707 701 1.648
14 2_3100000 0.007058891 702 1.656
14 2_3200000 0.002399361 703 1.663
14 2_3300000 0.031972844 704 1.666
14 2_3400000 0.008476319 705 1.698
14 2_3500000 0.02456839 706 1.706
14 2_3600000 0 707 1.731
14 2_3700000 0 708 1.731
14 2_3800000 0 709 1.731
14 2_3900000 0.023196387 710 1.731
14 2_4000000 0.007550777 711 1.754
14 2_4100000 0 712 1.761
14 2_4200000 0.017486187 713 1.761
14 2_4300000 0.01753461 714 1.779
14 2_4400000 0 715 1.797
14 2_4500000 0.021039209 716 1.797
14 58a_0 0 717 1.818
14 58a_100000 0 718 1.818
14 58a_200000 0.008381191 719 1.818
14 58a_300000 0.004191692 720 1.826
14 58a_400000 0 721 1.830
14 58a_500000 0 722 1.830
14 396_0 723 1.830
1 853_0 0.028414257 724 0.000
1 79_0 0 725 0.028
1 79_100000 0 726 0.028
1 79_200000 0.00827258 727 0.028
1 79_300000 0.000366622 728 0.037
1 79_400000 0.016128381 729 0.037
1 79_500000 0 730 0.053
1 79_600000 0.022286365 731 0.053
1 79_700000 0 732 0.075
1 79_800000 0.027726251 733 0.075
1 79_900000 0 734 0.103
1 79_1000000 0.030440065 735 0.103
1 79_1100000 0.011044912 736 0.134
1 69_0 0.022311091 737 0.145
1 69_100000 0 738 0.167
1 69_200000 0.020910544 739 0.167
1 69_300000 0 740 0.188
1 69_400000 0.006851169 741 0.188
1 69_500000 0.006852185 742 0.195
1 69_600000 0 743 0.202
1 69_700000 0 744 0.202
1 69_800000 0.010927764 745 0.202
1 69_900000 0.016404655 746 0.213
1 69_1000000 0 747 0.229
1 69_1100000 0.004540732 748 0.229
1 69_1200000 0 749 0.233
1 181_0 0.01671047 750 0.233
1 181_100000 0 751 0.250
1 181_200000 0 752 0.250
1 181_300000 0.039729983 753 0.250
1 181_400000 0.00954293 754 0.290
1 181_500000 0 755 0.299
1 559_0 0 756 0.299
1 60_0 0.026886634 757 0.299
1 60_100000 0.02240511 758 0.326
1 60_200000 0 759 0.349
1 60_300000 0.042482472 760 0.349
1 60_400000 0 761 0.391
1 60_500000 0 762 0.391
1 60_600000 0.017967479 763 0.391
1 60_700000 0 764 0.409
1 60_800000 0.022556298 765 0.409
1 60_900000 0 766 0.432
1 60_1000000 0 767 0.432
1 60_1100000 0 768 0.432
1 60_1200000 0.013860314 769 0.432
1 60_1300000 0 770 0.446
1 60_1400000 0 771 0.446
1 333_100000 0 772 0.446
1 333_0 0.008734256 773 0.446
1 165_0 0.01956624 774 0.454
1 165_100000 0 775 0.474
1 165_200000 0 776 0.474
1 165_300000 0 777 0.474
1 165_400000 0.022680837 778 0.474
1 165_500000 0 779 0.497
1 362_100000 0 780 0.497
1 362_0 0 781 0.497
1 308_100000 0.016995955 782 0.497
1 308_0 0.031488766 783 0.514
1 314_100000 0.006528863 784 0.545
1 314_0 0.029403985 785 0.552
1 240_300000 0 786 0.581
1 240_200000 0 787 0.581
1 240_100000 0.017215557 788 0.581
1 82_1000000 0 789 0.598
1 82_900000 0 790 0.598
1 82_800000 0.017216832 791 0.598
1 82_700000 0 792 0.615
1 82_600000 0 793 0.615
1 82_500000 0.014408025 794 0.615
1 82_400000 0.014408109 795 0.630
1 82_300000 0 796 0.644
1 82_200000 0 797 0.644
1 82_100000 0.01479515 798 0.644
1 427_0 0 799 0.659
1 254b_0 0.007755616 800 0.659
1 254b_100000 0 801 0.667
1 80a_900000 0.005763603 802 0.667
1 80a_800000 0 803 0.673
1 80a_700000 0 804 0.673
1 80a_600000 0.008066199 805 0.673
1 80a_500000 0.016227436 806 0.681
1 80a_400000 0 807 0.697
1 80a_300000 0.006041897 808 0.697
1 80a_200000 0.006040688 809 0.703
1 80a_100000 0 810 0.709
1 80a_0 0 811 0.709
1 146_0 0.006196363 812 0.709
1 146_100000 0.005836382 813 0.715
1 146_200000 0 814 0.721
1 115b_100000 0 815 0.721
1 115b_200000 0 816 0.721
1 115b_300000 0 817 0.721
1 115b_400000 0 818 0.721
1 187_0 0 819 0.721
1 232_300000 0.012118107 820 0.721
1 232_200000 0 821 0.733
1 232_100000 0 822 0.733
1 74_1200000 0.012386913 823 0.733
1 74_1100000 0 824 0.745
1 74_1000000 0 825 0.745
1 74_600000 826 0.745
2 18_2200000 0 827 0.000
2 18_2100000 0.034571016 828 0.000
2 18_2000000 0.002204989 829 0.035
2 18_1900000 0.008545317 830 0.037
2 18_1800000 0.02477162 831 0.045
2 18_1700000 0 832 0.070
2 18_1600000 0.025535097 833 0.070
2 18_1500000 0 834 0.096
2 18_1400000 0.007545675 835 0.096
2 18_1300000 0.030691585 836 0.103
2 18_1200000 0 837 0.134
2 18_1100000 0.008502618 838 0.134
2 18_1000000 0 839 0.142
2 18_900000 0.012762117 840 0.142
2 18_800000 0 841 0.155
2 18_700000 0 842 0.155
2 18_600000 0.016969467 843 0.155
2 18_500000 0.017313055 844 0.172
2 18_400000 0.016685524 845 0.189
2 18_300000 0.017173456 846 0.206
2 18_200000 0.007883585 847 0.223
2 18_100000 0 848 0.231
2 18_0 0 849 0.231
2 697_0 0.024573302 850 0.231
2 89_0 0.016710093 851 0.256
2 89_100000 0.011146615 852 0.272
2 89_200000 0 853 0.284
2 89_300000 0.008121226 854 0.284
2 89_400000 0.011387379 855 0.292
2 89_500000 0.02099918 856 0.303
2 89_600000 0 857 0.324
2 89_700000 0.016750137 858 0.324
2 89_800000 0 859 0.341
2 89_900000 0.016741133 860 0.341
2 89_1000000 0 861 0.358
2 44a_700000 0 862 0.358
2 44a_600000 0.013962986 863 0.358
2 44a_500000 0.014421156 864 0.372
2 44a_400000 0.008648283 865 0.386
2 44a_300000 0.010304224 866 0.395
2 44a_200000 0 867 0.405
2 44a_100000 0 868 0.405
2 44a_0 0 869 0.405
2 212_0 0.020825798 870 0.405
2 212_100000 0 871 0.426
2 212_200000 0.020982139 872 0.426
2 212_300000 0 873 0.447
2 212_400000 0.01818088 874 0.447
2 249_0 0.020602417 875 0.465
2 249_100000 0 876 0.486
2 249_200000 0.013069925 877 0.486
2 81_0 0 878 0.499
2 81_100000 0 879 0.499
2 81_200000 0.033844755 880 0.499
2 81_300000 0.012673057 881 0.532
2 81_400000 0 882 0.545
2 81_500000 0 883 0.545
2 81_700000 0.004076225 884 0.545
2 81_800000 0.003945435 885 0.549
2 81_900000 0 886 0.553
2 81_1000000 0 887 0.553
2 112_0 0 888 0.553
2 112_100000 0 889 0.553
2 112_200000 0.00197343 890 0.553
2 112_300000 0.002006892 891 0.555
2 112_400000 0 892 0.557
2 112_500000 0 893 0.557
2 112_600000 0 894 0.557
2 112_800000 0 895 0.557
2 216_300000 0.009342694 896 0.557
2 19_0 0.011634463 897 0.566
2 19_100000 0 898 0.578
2 19_200000 0 899 0.578
2 19_300000 0 900 0.578
2 19_400000 0.008682054 901 0.578
2 19_600000 0 902 0.587
2 19_800000 0 903 0.587
2 19_1000000 0 904 0.587
2 19_1100000 5.25E-005 905 0.587
2 19_1200000 0.004288978 906 0.587
2 19_1300000 0 907 0.591
2 19_1400000 0.022780596 908 0.591
2 19_1500000 0 909 0.614
2 73_600000 0 910 0.614
2 73_700000 0 911 0.614
2 73_800000 0 912 0.614
2 73_900000 0 913 0.614
2 73_1000000 0 914 0.614
2 73_1100000 0 915 0.614
2 73_1200000 0 916 0.614
2 65_0 0 917 0.614
2 65_300000 0 918 0.614
2 65_500000 0 919 0.614
2 65_600000 0.017553609 920 0.614
2 65_700000 0 921 0.631
2 65_800000 0.008780327 922 0.631
2 65_1000000 0 923 0.640
2 65_1100000 0.022585294 924 0.640
2 42_200000 0.015083375 925 0.663
2 42_300000 0 926 0.678
2 42_400000 0 927 0.678
2 42_600000 0.009740096 928 0.678
2 42_700000 0 929 0.688
2 42_800000 0.028346511 930 0.688
2 42_900000 0 931 0.716
2 42_1000000 0.008395834 932 0.716
2 42_1200000 0.007946445 933 0.724
2 42_1400000 0 934 0.732
2 42_1500000 0 935 0.732
2 42_1600000 0 936 0.732
2 173_500000 0 937 0.732
2 173_400000 0.010568872 938 0.732
2 173_300000 0 939 0.743
2 173_200000 0.005629727 940 0.743
2 173_100000 0 941 0.749
2 173_0 0.011942333 942 0.749
2 41a_0 0.01595148 943 0.760
2 41a_100000 0.00325881 944 0.776
2 41a_200000 0.004333341 945 0.780
2 41a_300000 0 946 0.784
2 41a_400000 0.005433806 947 0.784
2 41a_500000 0.009797062 948 0.789
2 41a_600000 0 949 0.799
2 41a_700000 0.004277537 950 0.799
2 41a_800000 0 951 0.804
2 41a_900000 0.008555299 952 0.804
2 41a_1000000 0 953 0.812
2 41a_1100000 0 954 0.812
2 41a_1200000 0.043615897 955 0.812
2 41a_1300000 0 956 0.856
2 41a_1400000 0.008560527 957 0.856
2 41a_1500000 0 958 0.864
2 429_0 0.002470822 959 0.864
2 151_0 0.011268887 960 0.867
2 151_100000 0 961 0.878
2 151_200000 0.017049483 962 0.878
2 151_300000 0 963 0.895
2 151_400000 0.016711207 964 0.895
2 151_500000 0 965 0.912
2 151_600000 0.014977442 966 0.912
2 27_0 0.006533257 967 0.927
2 27_100000 0.000889658 968 0.933
2 27_200000 0.009732733 969 0.934
2 27_300000 0.024598747 970 0.944
2 27_400000 0.006516566 971 0.968
2 27_500000 0.006105127 972 0.975
2 27_600000 0.009599287 973 0.981
2 27_700000 0 974 0.991
2 27_800000 0.020457641 975 0.991
2 27_900000 0 976 1.011
2 27_1000000 0 977 1.011
2 27_1100000 0.023952553 978 1.011
2 27_1200000 0 979 1.035
2 27_1300000 0.007314548 980 1.035
2 27_1400000 0.035561494 981 1.042
2 27_1500000 0.001961481 982 1.078
2 27_1600000 0.018189992 983 1.080
2 27_1700000 0.012928166 984 1.098
2 27_1800000 0 985 1.111
2 27_1900000 0 986 1.111
2 27_2000000 987 1.111
3 137_400000 0.017237601 988 0.000
3 137_300000 0.006099576 989 0.017
3 137_200000 0 990 0.023
3 137_100000 0.012672517 991 0.023
3 137_0 0.01908055 992 0.036
3 316_100000 0 993 0.055
3 316_0 0 994 0.055
3 55a_300000 0.011906263 995 0.055
3 55a_200000 0 996 0.067
3 55a_100000 0.013774971 997 0.067
3 116_800000 0.003858269 998 0.081
3 116_700000 0.009655238 999 0.085
3 116_600000 0.005621999 1000 0.094
3 116_500000 0 1001 0.100
3 116_400000 0.006167407 1002 0.100
3 116_300000 0 1003 0.106
3 116_200000 0.006168284 1004 0.106
3 116_100000 0.016403601 1005 0.112
3 116_0 0.010931941 1006 0.129
3 246_200000 0.002818004 1007 0.140
3 246_100000 0.002653828 1008 0.142
3 246_0 0 1009 0.145
3 62_1200000 0 1010 0.145
3 62_1000000 0 1011 0.145
3 62_800000 0 1012 0.145
3 62_700000 0 1013 0.145
3 62_600000 0.016209441 1014 0.145
3 62_500000 0 1015 0.161
3 70b_300000 0 1016 0.161
3 70b_200000 0.010291926 1017 0.161
3 70b_0 0.007085199 1018 0.172
3 389_0 0.005838503 1019 0.179
3 35_0 0.005903207 1020 0.184
3 35_100000 0.006177265 1021 0.190
3 35_400000 0 1022 0.197
3 35_600000 0 1023 0.197
3 35_800000 0.004589056 1024 0.197
3 35_900000 0 1025 0.201
3 35_1100000 0 1026 0.201
3 35_1400000 0 1027 0.201
3 72_300000 0 1028 0.201
3 72_200000 0.002043446 1029 0.201
3 72_100000 0.002106809 1030 0.203
3 72_0 0 1031 0.205
3 23_2100000 0 1032 0.205
3 23_2000000 0.013821821 1033 0.205
3 23_1900000 0 1034 0.219
3 23_1700000 0 1035 0.219
3 23_1600000 0.01872552 1036 0.219
3 23_1500000 0 1037 0.238
3 23_1300000 0 1038 0.238
3 23_1200000 0 1039 0.238
3 23_1100000 0 1040 0.238
3 23_1000000 0 1041 0.238
3 23_900000 0 1042 0.238
3 23_800000 0.014049237 1043 0.238
3 23_700000 0 1044 0.252
3 23_600000 0.00852537 1045 0.252
3 23_500000 0 1046 0.260
3 23_400000 0 1047 0.260
3 23_100000 0.00639929 1048 0.260
3 265_200000 0 1049 0.267
3 265_100000 0.00639718 1050 0.267
3 5a_1900000 0 1051 0.273
3 5a_1800000 0.004460998 1052 0.273
3 5a_1700000 0 1053 0.278
3 5a_1600000 0 1054 0.278
3 5a_1500000 0 1055 0.278
3 5a_1400000 0 1056 0.278
3 5a_1300000 0.009491326 1057 0.278
3 5a_1200000 0.015743758 1058 0.287
3 5a_1100000 0.012986569 1059 0.303
3 5a_1000000 0 1060 0.316
3 5a_900000 0.010521342 1061 0.316
3 5a_800000 0 1062 0.326
3 5a_700000 0.02159666 1063 0.326
3 5a_600000 0 1064 0.348
3 5a_500000 0 1065 0.348
3 5a_400000 0.019853314 1066 0.348
3 5a_300000 0.015972738 1067 0.368
3 5a_200000 0.021459873 1068 0.384
3 106a_0 0.020250508 1069 0.405
3 106a_100000 0 1070 0.426
3 106a_200000 0 1071 0.426
3 106a_300000 0.026827898 1072 0.426
3 106a_400000 0 1073 0.452
3 106a_500000 0.019715372 1074 0.452
3 106a_600000 0 1075 0.472
3 106a_700000 0 1076 0.472
3 258_300000 0.00734655 1077 0.472
3 258_200000 0.012365511 1078 0.479
3 258_100000 0.00980101 1079 0.492
3 258_0 0 1080 0.502
3 169_500000 0 1081 0.502
3 169_400000 0.019730271 1082 0.502
3 169_300000 0.017236333 1083 0.521
3 169_200000 0.011008044 1084 0.539
3 169_100000 0.014779546 1085 0.550
3 169_0 0.028378736 1086 0.564
3 98_900000 0 1087 0.593
3 98_800000 0.018688749 1088 0.593
3 98_700000 0 1089 0.611
3 98_600000 0 1090 0.611
3 98_500000 0 1091 0.611
3 98_400000 0.035637279 1092 0.611
3 98_300000 0.006721242 1093 0.647
3 98_200000 0 1094 0.654
3 98_100000 0 1095 0.654
3 98_0 0.02590724 1096 0.654
3 261_0 0.005893829 1097 0.680
3 261_100000 0 1098 0.686
3 261_200000 0.015848702 1099 0.686
3 273a_0 0.053303599 1100 0.701
3 64_1100000 0 1101 0.755
3 64_1000000 0.018627492 1102 0.755
3 64_900000 0.027502339 1103 0.773
3 64_700000 0.027445026 1104 0.801
3 64_600000 0.020364052 1105 0.828
3 64_500000 0 1106 0.849
3 64_400000 0.008507065 1107 0.849
3 64_300000 0 1108 0.857
3 64_200000 0 1109 0.857
3 64_100000 0 1110 0.857
3 64_0 1111 0.857
4 1_4700000 0.007877704 1112 0.000
4 1_4600000 0.016059901 1113 0.008
4 1_4500000 0.005992055 1114 0.024
4 1_4400000 0.010717877 1115 0.030
4 1_4300000 0 1116 0.041
4 1_4200000 0.023014978 1117 0.041
4 1_4000000 0.023078044 1118 0.064
4 1_3900000 0 1119 0.087
4 1_3800000 0.015668893 1120 0.087
4 1_3700000 0.031689821 1121 0.102
4 1_3600000 0 1122 0.134
4 1_3500000 0.027063209 1123 0.134
4 1_3400000 0.010582835 1124 0.161
4 1_3300000 0.014047087 1125 0.172
4 1_3200000 0 1126 0.186
4 1_3100000 0.029001162 1127 0.186
4 1_3000000 0 1128 0.215
4 1_2900000 0.046796709 1129 0.215
4 1_2800000 0.001342464 1130 0.262
4 1_2700000 0.011994635 1131 0.263
4 1_2600000 0.00071983 1132 0.275
4 1_2500000 0.012397782 1133 0.276
4 1_2400000 0 1134 0.288
4 1_2300000 0.028821487 1135 0.288
4 1_2200000 0 1136 0.317
4 1_2100000 0 1137 0.317
4 1_1900000 0.053431352 1138 0.317
4 1_1700000 0.010275873 1139 0.370
4 1_1600000 0.006958425 1140 0.381
4 1_1500000 0.004144483 1141 0.388
4 1_1400000 0 1142 0.392
4 1_1300000 0.036638596 1143 0.392
4 1_1200000 0 1144 0.428
4 1_1100000 0 1145 0.428
4 1_1000000 0 1146 0.428
4 1_900000 0.020330385 1147 0.428
4 1_800000 0 1148 0.449
4 1_700000 0.035041504 1149 0.449
4 1_600000 0 1150 0.484
4 1_500000 0.010455242 1151 0.484
4 1_400000 0 1152 0.494
4 1_300000 0 1153 0.494
4 1_200000 0.02602781 1154 0.494
4 1_100000 0 1155 0.520
4 1_0 0.019342077 1156 0.520
4 54_1400000 0 1157 0.540
4 54_1300000 0.013938982 1158 0.540
4 54_1200000 0.027546759 1159 0.553
4 54_1100000 0 1160 0.581
4 54_1000000 0.00654839 1161 0.581
4 54_900000 0.007476622 1162 0.588
4 54_800000 0.027836643 1163 0.595
4 54_600000 0.009894979 1164 0.623
4 54_500000 0.008732469 1165 0.633
4 54_400000 0 1166 0.641
4 54_300000 0.030853111 1167 0.641
4 54_100000 0 1168 0.672
4 54_0 0.005642124 1169 0.672
4 120_0 0 1170 0.678
4 120_100000 0 1171 0.678
4 120_300000 0 1172 0.678
4 120_400000 0 1173 0.678
4 120_500000 0 1174 0.678
4 120_600000 0.005980462 1175 0.678
4 120_700000 0.009702365 1176 0.684
4 150_300000 0.002986781 1177 0.694
4 150_200000 0.003016781 1178 0.697
4 57_800000 0 1179 0.700
4 57_700000 0 1180 0.700
4 57_600000 0.008070935 1181 0.700
4 57_500000 0 1182 0.708
4 57_400000 0 1183 0.708
4 57_300000 0 1184 0.708
4 57_200000 0.024945338 1185 0.708
4 57_0 0.010913878 1186 0.733
4 154_0 0.001469809 1187 0.744
4 154_200000 0 1188 0.745
4 154_300000 0.003810036 1189 0.745
4 154_400000 0.003600302 1190 0.749
4 154_500000 0 1191 0.752
4 254a_0 0 1192 0.752
4 254a_100000 0 1193 0.752
4 110_200000 0.007853434 1194 0.752
4 110_300000 0.00023421 1195 0.760
4 110_400000 0 1196 0.761
4 134_700000 0.006074082 1197 0.761
4 177_300000 0.006077308 1198 0.767
4 177_200000 0 1199 0.773
4 177_100000 0 1200 0.773
4 177_0 0 1201 0.773
4 6a_2300000 0.014554867 1202 0.773
4 6a_2200000 0.02202471 1203 0.787
4 6a_2100000 0 1204 0.809
4 6a_2000000 0.040144013 1205 0.809
4 6a_1900000 0 1206 0.849
4 6a_1800000 0 1207 0.849
4 6a_1700000 0 1208 0.849
4 6a_1600000 0 1209 0.849
4 6a_1300000 0.038269141 1210 0.849
4 6a_1200000 0 1211 0.888
4 6a_1100000 0.005178559 1212 0.888
4 6a_1000000 0.005288957 1213 0.893
4 6a_900000 0 1214 0.898
4 6a_800000 0.009729702 1215 0.898
4 6a_700000 0 1216 0.908
4 6a_600000 0.013194353 1217 0.908
4 6a_500000 0.025343056 1218 0.921
4 6a_200000 0 1219 0.946
4 6a_100000 0.010317812 1220 0.946
4 70c_100000 0 1221 0.957
4 55b_0 0.007919888 1222 0.957
4 55b_100000 0.002383086 1223 0.965
4 55b_200000 0.015511564 1224 0.967
4 55b_300000 0 1225 0.983
4 55b_400000 0.02754601 1226 0.983
4 55b_500000 0.020585321 1227 1.010
4 55b_600000 0 1228 1.031
4 55b_1000000 0.004136236 1229 1.031
4 443_0 0 1230 1.035
4 320_0 0 1231 1.035
4 320_100000 0 1232 1.035
4 346_0 0 1233 1.035
4 346_100000 0.098032733 1234 1.035
4 85_1100000 0.004369605 1235 1.133
4 85_1000000 0.008270139 1236 1.137
4 85_900000 0.001213733 1237 1.146
4 85_800000 0.026790687 1238 1.147
4 85_700000 0 1239 1.174
4 85_600000 0.023825069 1240 1.174
4 85_500000 0 1241 1.197
4 85_400000 0.030599812 1242 1.197
4 85_300000 0 1243 1.228
4 85_200000 0.021423295 1244 1.228
4 85_100000 0.004123622 1245 1.249
4 85_0 0.022695817 1246 1.253
4 1287_0 0.030376273 1247 1.276
4 196_500000 0.008356355 1248 1.307
4 196_400000 0.016116367 1249 1.315
4 196_300000 0 1250 1.331
4 196_200000 0 1251 1.331
4 196_100000 0.020907727 1252 1.331
4 196_0 0 1253 1.352
4 541_0 0.004193483 1254 1.352
4 384_0 0 1255 1.356
4 44c_0 0 1256 1.356
4 44c_100000 0 1257 1.356
4 44c_200000 0 1258 1.356
4 766_0 0.016116179 1259 1.356
4 479_0 0 1260 1.372
4 638_0 0 1261 1.372
4 163_0 0.02007249 1262 1.372
4 163_100000 0.011929059 1263 1.392
4 163_200000 0.010670396 1264 1.404
4 163_300000 0.013218809 1265 1.415
4 163_400000 0.008980627 1266 1.428
4 163_500000 0 1267 1.437
4 163_600000 0 1268 1.437
4 633_0 1269 1.437
5 129a_0 0 1270 0.000
5 129a_100000 0 1271 0.000
5 129a_200000 0 1272 0.000
5 66_0 0.011958105 1273 0.000
5 66_100000 0.011938294 1274 0.012
5 66_200000 0 1275 0.024
5 66_300000 0 1276 0.024
5 66_400000 0.013741684 1277 0.024
5 66_500000 0.018507233 1278 0.038
5 66_600000 0.0299006 1279 0.056
5 66_700000 0.018423726 1280 0.086
5 66_800000 0 1281 0.104
5 66_900000 0 1282 0.104
5 66_1000000 0.020902096 1283 0.104
5 66_1100000 0 1284 0.125
5 66_1200000 0.064507294 1285 0.125
5 32_100000 0.024811493 1286 0.190
5 32_200000 0.019201072 1287 0.215
5 32_300000 0 1288 0.234
5 32_400000 0.036479545 1289 0.234
5 32_500000 0 1290 0.270
5 32_600000 0.005821197 1291 0.270
5 32_700000 0.023533087 1292 0.276
5 32_800000 0.008885038 1293 0.300
5 32_900000 0 1294 0.309
5 32_1000000 0.012757258 1295 0.309
5 32_1100000 0 1296 0.321
5 32_1200000 0 1297 0.321
5 32_1300000 0.029793104 1298 0.321
5 32_1400000 0 1299 0.351
5 32_1500000 0.035405427 1300 0.351
5 32_1600000 0.014964393 1301 0.387
5 32_1700000 0 1302 0.402
5 32_1800000 0.007553367 1303 0.402
5 205_0 0.002454966 1304 0.409
5 205_100000 0.035698833 1305 0.412
5 205_200000 0.021254731 1306 0.447
5 205_300000 0 1307 0.468
5 205_400000 0.016768676 1308 0.468
5 461_0 0 1309 0.485
5 1147_0 0.021744452 1310 0.485
5 242_0 0.017054844 1311 0.507
5 242_100000 0 1312 0.524
5 242_200000 0 1313 0.524
5 242_300000 0 1314 0.524
5 104a_0 0.027416766 1315 0.524
5 104a_100000 0.000256001 1316 0.551
5 104a_200000 0.010515828 1317 0.552
5 104a_300000 0.020771994 1318 0.562
5 244_100000 0 1319 0.583
5 244_0 0.009847924 1320 0.583
5 221b_0 0.09197044 1321 0.593
5 252_0 0.033792154 1322 0.685
5 94_200000 0 1323 0.719
5 94_300000 0 1324 0.719
5 94_400000 0 1325 0.719
5 516_0 0.033301078 1326 0.719
5 141_600000 0.006117883 1327 0.752
5 141_100000 0 1328 0.758
5 53_500000 0 1329 0.758
5 304_100000 0 1330 0.758
5 88_400000 0 1331 0.758
5 153_500000 0.024893419 1332 0.758
5 194_100000 0.003917716 1333 0.783
5 263_100000 0.004411579 1334 0.787
5 326_0 0.016552641 1335 0.791
5 149_300000 0.006208968 1336 0.808
5 149_200000 0.006176196 1337 0.814
5 149_100000 0 1338 0.820
5 149_0 0 1339 0.820
5 449_0 0 1340 0.820
5 158_0 0 1341 0.820
5 158_100000 0 1342 0.820
5 158_200000 0 1343 0.820
5 158_300000 0 1344 0.820
5 158_400000 0 1345 0.820
5 158_500000 0 1346 0.820
5 197_300000 0 1347 0.820
5 197_400000 0 1348 0.820
5 170_300000 0 1349 0.820
5 170_200000 0 1350 0.820
5 170_100000 0 1351 0.820
5 170_0 0 1352 0.820
5 266_200000 0 1353 0.820
5 266_100000 0 1354 0.820
5 266_0 0 1355 0.820
5 288_200000 0 1356 0.820
5 327_0 0 1357 0.820
5 327_100000 0 1358 0.820
5 368_0 0 1359 0.820
5 36_1700000 0 1360 0.820
5 36_1600000 0 1361 0.820
5 36_1500000 0 1362 0.820
5 36_1400000 0 1363 0.820
5 36_1300000 0 1364 0.820
5 36_1200000 0 1365 0.820
5 36_1100000 0 1366 0.820
5 36_1000000 0 1367 0.820
5 36_900000 0 1368 0.820
5 36_800000 0 1369 0.820
5 36_700000 0 1370 0.820
5 36_600000 0 1371 0.820
5 36_500000 0 1372 0.820
5 36_300000 0 1373 0.820
5 36_200000 0 1374 0.820
5 36_100000 0 1375 0.820
5 36_0 1376 0.820
6 270_200000 0 1377 0.000
6 270_100000 0 1378 0.000
6 270_0 0.044379139 1379 0.000
6 104b_400000 0 1380 0.044
6 104b_300000 0.008027182 1381 0.044
6 104b_200000 0 1382 0.052
6 104b_100000 0.018282215 1383 0.052
6 104b_0 0.018302607 1384 0.071
6 51_1400000 0.007338227 1385 0.089
6 51_1300000 0.002197654 1386 0.096
6 51_1200000 0.019481344 1387 0.099
6 51_1100000 0 1388 0.118
6 51_1000000 0.015685153 1389 0.118
6 51_900000 0.002764126 1390 0.134
6 51_800000 0.004984942 1391 0.136
6 51_700000 0.031707047 1392 0.141
6 51_600000 0.017767938 1393 0.173
6 51_500000 0.019810584 1394 0.191
6 51_400000 0.001320853 1395 0.211
6 51_300000 0.009956059 1396 0.212
6 51_200000 0.004474113 1397 0.222
6 51_100000 0.007258396 1398 0.226
6 51_0 0 1399 0.234
6 8_3400000 0 1400 0.234
6 8_3300000 0.022161882 1401 0.234
6 8_3200000 0 1402 0.256
6 8_3100000 0.030487153 1403 0.256
6 8_3000000 0 1404 0.286
6 8_2900000 0.023463208 1405 0.286
6 8_2800000 0.02157516 1406 0.310
6 8_2700000 0.011680777 1407 0.331
6 8_2600000 0.014115896 1408 0.343
6 8_2500000 0.006350117 1409 0.357
6 8_2400000 0.004848468 1410 0.364
6 8_2300000 0 1411 0.368
6 8_2200000 0.015931326 1412 0.368
6 8_2100000 0.014688549 1413 0.384
6 8_2000000 0 1414 0.399
6 8_1900000 0 1415 0.399
6 8_1800000 0.031627068 1416 0.399
6 8_1700000 0 1417 0.431
6 8_1600000 0 1418 0.431
6 8_1500000 0 1419 0.431
6 8_1400000 0 1420 0.431
6 8_1300000 0 1421 0.431
6 8_1200000 0 1422 0.431
6 8_1100000 0 1423 0.431
6 8_1000000 0.006487306 1424 0.431
6 8_900000 0.00702986 1425 0.437
6 8_800000 0.007853216 1426 0.444
6 8_700000 0.012253982 1427 0.452
6 8_600000 0 1428 0.464
6 8_500000 0 1429 0.464
6 8_400000 0.01001085 1430 0.464
6 8_300000 0.016354586 1431 0.474
6 8_200000 0 1432 0.491
6 8_100000 0 1433 0.491
6 8_0 0.029189348 1434 0.491
6 293_100000 0.008202399 1435 0.520
6 293_0 0.0114569 1436 0.528
6 21_0 0 1437 0.540
6 21_100000 0.028471304 1438 0.540
6 21_200000 0 1439 0.568
6 21_300000 0.011825797 1440 0.568
6 21_500000 0 1441 0.580
6 21_600000 0.030664857 1442 0.580
6 21_700000 0.00864025 1443 0.610
6 21_1200000 0 1444 0.619
6 21_1600000 0 1445 0.619
6 21_1700000 0.016217203 1446 0.619
6 21_2000000 0 1447 0.635
6 307_100000 0 1448 0.635
6 136_0 0.008929082 1449 0.635
6 136_300000 0.012938907 1450 0.644
6 136_400000 0.016969912 1451 0.657
6 136_500000 0 1452 0.674
6 136_600000 0.009471959 1453 0.674
6 67_400000 0 1454 0.684
6 67_300000 0 1455 0.684
6 67_200000 0.000626464 1456 0.684
6 43_1000000 0.001676131 1457 0.684
6 43_1100000 0.001731901 1458 0.686
6 43_1400000 0 1459 0.688
6 43_1500000 0.012071434 1460 0.688
6 119_200000 0 1461 0.700
6 119_300000 0.002602228 1462 0.700
6 119_400000 0.001390886 1463 0.702
6 119_500000 0 1464 0.704
6 119_600000 0 1465 0.704
6 248_100000 0.010762814 1466 0.704
6 25_1600000 0 1467 0.714
6 25_1500000 0 1468 0.714
6 25_1400000 0 1469 0.714
6 25_1300000 0 1470 0.714
6 25_1200000 0.003426387 1471 0.714
6 25_1100000 0.001667219 1472 0.718
6 25_1000000 0.00385216 1473 0.720
6 25_900000 0 1474 0.723
6 25_800000 0.013189872 1475 0.723
6 25_600000 0 1476 0.737
6 25_500000 0 1477 0.737
6 25_400000 0.018360805 1478 0.737
6 25_300000 0.005109082 1479 0.755
6 25_200000 0 1480 0.760
6 25_100000 0 1481 0.760
6 190_300000 0 1482 0.760
6 190_200000 0.026048946 1483 0.760
6 190_100000 0.005462401 1484 0.786
6 190_0 0.031198532 1485 0.792
6 179_200000 0 1486 0.823
6 179_100000 0.01462256 1487 0.823
6 179_0 0 1488 0.837
6 262_0 0 1489 0.837
6 262_100000 0 1490 0.837
6 262_200000 0.039941047 1491 0.837
6 16_2400000 0.002682718 1492 0.877
6 16_2300000 0.006794817 1493 0.880
6 16_2200000 0.00430006 1494 0.887
6 16_2100000 0.009404263 1495 0.891
6 16_2000000 0 1496 0.901
6 16_1900000 0.027968259 1497 0.901
6 16_1800000 0 1498 0.929
6 16_1700000 0.013635704 1499 0.929
6 16_1600000 0.0137007 1500 0.942
6 16_1500000 0 1501 0.956
6 16_1400000 0.023854019 1502 0.956
6 16_1300000 0.02143502 1503 0.980
6 16_1100000 0.021638243 1504 1.001
6 16_1000000 0 1505 1.023
6 16_900000 0.037545184 1506 1.023
6 16_800000 0.005431547 1507 1.060
6 16_700000 0.012881088 1508 1.066
6 16_600000 0 1509 1.079
6 16_500000 0.041887463 1510 1.079
6 16_400000 0 1511 1.121
6 16_300000 0.032549068 1512 1.121
6 16_200000 0.006982188 1513 1.153
6 16_100000 0 1514 1.160
6 16_0 1515 1.160
7 251_100000 0 1516 0.000
7 251_0 0 1517 0.000
7 335_100000 0.015178677 1518 0.000
7 335_0 0.07010079 1519 0.015
7 113_700000 0 1520 0.085
7 113_600000 0.042721971 1521 0.085
7 113_500000 0 1522 0.128
7 113_400000 0.012794032 1523 0.128
7 113_300000 0.012233249 1524 0.141
7 113_200000 0.015784942 1525 0.153
7 113_100000 0 1526 0.169
7 130_800000 0.042431251 1527 0.169
7 130_700000 0 1528 0.211
7 130_600000 0.017921276 1529 0.211
7 130_500000 0.020514692 1530 0.229
7 130_400000 0.023958683 1531 0.250
7 130_300000 0.013684828 1532 0.274
7 130_200000 0.026679102 1533 0.287
7 130_100000 0.015812213 1534 0.314
7 416_0 0.033643374 1535 0.330
7 255_300000 0.010359894 1536 0.363
7 255_200000 0.016060445 1537 0.374
7 255_100000 0.017796062 1538 0.390
7 255_0 0 1539 0.408
7 476_0 0 1540 0.408
7 9c_500000 0.004469458 1541 0.408
7 9c_400000 0.012555467 1542 0.412
7 9c_300000 0.018906879 1543 0.425
7 9c_200000 0 1544 0.444
7 9c_100000 0.012632803 1545 0.444
7 9c_0 0 1546 0.456
7 114b_0 0.012571056 1547 0.456
7 114b_100000 0 1548 0.469
7 114b_200000 0.050455577 1549 0.469
7 114b_400000 0 1550 0.519
7 114b_500000 0.004913593 1551 0.519
7 274_0 0.004902261 1552 0.524
7 274_100000 0 1553 0.529
7 274_200000 0.011354979 1554 0.529
7 29_0 0.005969517 1555 0.540
7 29_100000 0.00871243 1556 0.546
7 29_200000 0 1557 0.555
7 29_300000 0 1558 0.555
7 29_400000 0.02161396 1559 0.555
7 29_500000 0 1560 0.577
7 29_600000 0 1561 0.577
7 29_800000 0 1562 0.577
7 29_900000 0 1563 0.577
7 29_1000000 0.005984156 1564 0.577
7 29_1100000 0.024714942 1565 0.583
7 29_1200000 0.018426977 1566 0.607
7 29_1400000 0.020360795 1567 0.626
7 29_1500000 0 1568 0.646
7 29_1700000 0 1569 0.646
7 29_1800000 0.020717267 1570 0.646
7 166_500000 0 1571 0.667
7 166_300000 0 1572 0.667
7 166_200000 0 1573 0.667
7 166_100000 0 1574 0.667
7 166_0 0 1575 0.667
7 56_1300000 0 1576 0.667
7 56_1100000 0.01989564 1577 0.667
7 56_1000000 0 1578 0.687
7 56_600000 0 1579 0.687
7 56_500000 0 1580 0.687
7 56_400000 0 1581 0.687
7 56_300000 0 1582 0.687
7 14_0 0 1583 0.687
7 14_100000 0 1584 0.687
7 14_200000 0 1585 0.687
7 14_400000 0 1586 0.687
7 14_500000 0 1587 0.687
7 14_600000 0 1588 0.687
7 14_700000 0 1589 0.687
7 14_800000 0 1590 0.687
7 14_1100000 0 1591 0.687
7 14_1200000 0 1592 0.687
7 14_1300000 0 1593 0.687
7 14_1400000 0 1594 0.687
7 14_1500000 0 1595 0.687
7 14_1600000 0 1596 0.687
7 14_1700000 0 1597 0.687
7 14_1900000 0 1598 0.687
7 14_2000000 0 1599 0.687
7 14_2500000 0 1600 0.687
7 14_2600000 0 1601 0.687
7 95_400000 0 1602 0.687
7 95_300000 0 1603 0.687
7 95_200000 0 1604 0.687
7 95_0 0 1605 0.687
7 184_400000 0 1606 0.687
7 184_300000 0 1607 0.687
7 184_100000 0 1608 0.687
7 184_0 0 1609 0.687
7 105_600000 0 1610 0.687
7 105_400000 0 1611 0.687
7 105_300000 0 1612 0.687
7 105_100000 0 1613 0.687
7 105_0 0 1614 0.687
7 313_100000 0 1615 0.687
7 313_0 0 1616 0.687
7 70a_0 0 1617 0.687
7 70a_100000 0 1618 0.687
7 97a_400000 0.00400242 1619 0.687
7 97a_200000 0 1620 0.691
7 97a_100000 0 1621 0.691
7 351_100000 0 1622 0.691
7 351_0 0 1623 0.691
7 229_100000 0 1624 0.691
7 229_0 0 1625 0.691
7 279_0 1626 0.691
8 233_0 0 1627 0.000
8 233_100000 0 1628 0.000
8 233_200000 0 1629 0.000
8 233_300000 0 1630 0.000
8 155_0 0 1631 0.000
8 155_100000 0 1632 0.000
8 155_200000 0 1633 0.000
8 155_300000 0 1634 0.000
8 155_400000 0 1635 0.000
8 155_500000 0 1636 0.000
8 76_1000000 0 1637 0.000
8 76_900000 0 1638 0.000
8 76_800000 0 1639 0.000
8 76_700000 0 1640 0.000
8 76_600000 0 1641 0.000
8 76_500000 0 1642 0.000
8 76_400000 0 1643 0.000
8 76_300000 0 1644 0.000
8 11_2900000 0 1645 0.000
8 11_2800000 0 1646 0.000
8 11_2700000 0 1647 0.000
8 11_2600000 0 1648 0.000
8 11_2500000 0 1649 0.000
8 11_2400000 0 1650 0.000
8 11_2300000 0 1651 0.000
8 11_2200000 0 1652 0.000
8 11_2100000 0 1653 0.000
8 11_2000000 0 1654 0.000
8 11_1900000 0 1655 0.000
8 11_1800000 0 1656 0.000
8 11_1700000 0 1657 0.000
8 11_1600000 0 1658 0.000
8 11_1400000 0 1659 0.000
8 11_1300000 0 1660 0.000
8 11_1200000 0 1661 0.000
8 11_1100000 0 1662 0.000
8 11_1000000 0 1663 0.000
8 11_900000 0 1664 0.000
8 11_800000 0 1665 0.000
8 11_700000 0 1666 0.000
8 11_600000 0 1667 0.000
8 11_500000 0 1668 0.000
8 11_400000 0 1669 0.000
8 11_300000 0 1670 0.000
8 11_200000 0 1671 0.000
8 11_100000 0 1672 0.000
8 11_0 0 1673 0.000
8 59_0 0 1674 0.000
8 59_100000 0 1675 0.000
8 59_200000 0 1676 0.000
8 59_600000 0 1677 0.000
8 59_700000 0 1678 0.000
8 59_900000 0 1679 0.000
8 59_1000000 0 1680 0.000
8 59_1100000 0 1681 0.000
8 59_1200000 0 1682 0.000
8 59_1300000 0 1683 0.000
8 59_1400000 0.014275037 1684 0.000
8 217_300000 0.009514455 1685 0.014
8 217_200000 0 1686 0.024
8 217_100000 0 1687 0.024
8 118_100000 0 1688 0.024
8 118_200000 0 1689 0.024
8 118_300000 0 1690 0.024
8 118_400000 0 1691 0.024
8 171_0 0.018614228 1692 0.024
8 77_1000000 0.009235752 1693 0.042
8 77_900000 0 1694 0.052
8 77_800000 0 1695 0.052
8 38_1300000 0 1696 0.052
8 38_1400000 0.00858841 1697 0.052
8 38_1500000 0.017585731 1698 0.060
8 412_0 0 1699 0.078
8 46_1500000 0 1700 0.078
8 46_1400000 0 1701 0.078
8 46_1300000 0.006193545 1702 0.078
8 46_1200000 0.012422188 1703 0.084
8 46_1100000 0 1704 0.096
8 46_1000000 0 1705 0.096
8 46_900000 0 1706 0.096
8 46_800000 0.050282447 1707 0.096
8 46_700000 0 1708 0.147
8 46_600000 0.007836512 1709 0.147
8 46_500000 0.030472272 1710 0.155
8 46_400000 0 1711 0.185
8 46_300000 0.020611119 1712 0.185
8 46_200000 0 1713 0.206
8 46_100000 0.015695397 1714 0.206
8 12b_0 0 1715 0.221
8 12b_100000 0 1716 0.221
8 12b_200000 0.019727674 1717 0.221
8 12b_300000 0 1718 0.241
8 12b_400000 0 1719 0.241
8 12b_500000 0.033908142 1720 0.241
8 12b_700000 0.186005535 1721 0.275
8 12b_1600000 0 1722 0.461
8 12b_1700000 0.00700682 1723 0.461
8 12b_1800000 0.030588583 1724 0.468
8 12b_1900000 0 1725 0.499
8 12b_2000000 0.035130163 1726 0.499
8 12b_2100000 0.006831232 1727 0.534
8 12b_2200000 0 1728 0.541
8 12b_2300000 0.01212063 1729 0.541
8 12b_2400000 0.017975817 1730 0.553
8 24_1900000 0.000754076 1731 0.571
8 24_1800000 0 1732 0.571
8 24_1700000 0.019166033 1733 0.571
8 24_1600000 0 1734 0.591
8 24_1500000 0.011413726 1735 0.591
8 24_1400000 0.011852105 1736 0.602
8 24_1300000 0 1737 0.614
8 24_1200000 0.007507649 1738 0.614
8 24_1100000 0 1739 0.621
8 24_1000000 0.017586208 1740 0.621
8 24_900000 0.004776862 1741 0.639
8 24_800000 0.011895304 1742 0.644
8 24_600000 0.010361307 1743 0.656
8 24_500000 0.006942541 1744 0.666
8 24_400000 0 1745 0.673
8 24_300000 0 1746 0.673
8 24_200000 0.012040972 1747 0.673
8 24_100000 0 1748 0.685
8 24_0 0 1749 0.685
8 45_1600000 0.014799211 1750 0.685
8 45_1500000 0.008631307 1751 0.700
8 45_1400000 0 1752 0.708
8 45_1300000 0 1753 0.708
8 45_1100000 0 1754 0.708
8 45_1000000 0.00879211 1755 0.708
8 45_900000 0 1756 0.717
8 45_800000 0.013276248 1757 0.717
8 45_700000 0 1758 0.730
8 45_400000 0.025245547 1759 0.730
8 45_200000 0 1760 0.756
8 45_100000 0.011309032 1761 0.756
8 45_0 0.00859212 1762 0.767
8 285_200000 0 1763 0.776
8 285_100000 0.004260577 1764 0.776
8 285_0 0 1765 0.780
8 3_0 0 1766 0.780
8 3_100000 0.004964429 1767 0.780
8 3_200000 0.016539527 1768 0.785
8 3_400000 0.033491567 1769 0.801
8 3_500000 0 1770 0.835
8 3_600000 0.014133955 1771 0.835
8 3_700000 0.024722445 1772 0.849
8 3_900000 0.004998701 1773 0.874
8 3_1000000 0 1774 0.879
8 3_1100000 0 1775 0.879
8 3_1200000 0.024308859 1776 0.879
8 3_1400000 0 1777 0.903
8 3_1500000 0.004675912 1778 0.903
8 3_1600000 0 1779 0.908
8 3_1700000 0 1780 0.908
8 3_1800000 0.008465325 1781 0.908
8 3_1900000 0 1782 0.916
8 3_2000000 0.004227056 1783 0.916
8 3_2100000 0.004275456 1784 0.920
8 3_2200000 0 1785 0.925
8 3_2300000 0.015353275 1786 0.925
8 3_2400000 0.001103858 1787 0.940
8 3_2500000 0.024680408 1788 0.941
8 3_2600000 0.008517882 1789 0.966
8 3_2700000 0.01353431 1790 0.974
8 3_2800000 0 1791 0.988
8 3_2900000 0.008371069 1792 0.988
8 3_3000000 0 1793 0.996
8 3_3100000 0 1794 0.996
8 3_3200000 0.012594548 1795 0.996
8 3_3300000 0 1796 1.009
8 3_3400000 0.009246965 1797 1.009
8 3_3500000 0.018705604 1798 1.018
8 3_3600000 0 1799 1.037
8 3_3700000 0.013463837 1800 1.037
8 3_3800000 0.006698792 1801 1.050
8 3_3900000 0.01593874 1802 1.057
8 3_4000000 0.01182084 1803 1.073
8 3_4100000 0 1804 1.085
8 3_4200000 0 1805 1.085
8 3_4300000 1806 1.085
9 124_700000 0 1807 0.000
9 124_600000 0 1808 0.000
9 124_500000 0.010540852 1809 0.000
9 124_400000 0.047329127 1810 0.011
9 124_300000 0 1811 0.058
9 124_200000 0.016870499 1812 0.058
9 124_100000 0 1813 0.075
9 124_0 0.007927899 1814 0.075
9 20_2300000 0 1815 0.083
9 20_2200000 0.014002269 1816 0.083
9 20_2100000 0.037583622 1817 0.097
9 20_2000000 0 1818 0.134
9 20_1900000 0 1819 0.134
9 20_1800000 0.023897092 1820 0.134
9 20_1700000 0.007944213 1821 0.158
9 20_1600000 0 1822 0.166
9 20_1500000 0 1823 0.166
9 20_1400000 0 1824 0.166
9 20_1200000 0 1825 0.166
9 20_1100000 0.004277164 1826 0.166
9 20_1000000 0 1827 0.170
9 20_900000 0.008556172 1828 0.170
9 20_800000 0.017296191 1829 0.179
9 20_700000 0 1830 0.196
9 20_600000 0 1831 0.196
9 20_500000 0 1832 0.196
9 20_400000 0 1833 0.196
9 20_300000 0.008729553 1834 0.196
9 330_100000 0 1835 0.205
9 12a_300000 0.004374681 1836 0.205
9 12a_100000 0 1837 0.209
9 12a_0 0.003251722 1838 0.209
9 147_500000 0.001149951 1839 0.213
9 147_600000 0 1840 0.214
9 352_0 0 1841 0.214
9 283_0 0 1842 0.214
9 283_100000 0 1843 0.214
9 152_300000 0 1844 0.214
9 152_500000 0 1845 0.214
9 103b_0 0 1846 0.214
9 103b_100000 0.002582271 1847 0.214
9 103b_200000 0.005435166 1848 0.216
9 103b_300000 0.006007921 1849 0.222
9 103b_500000 0.006004468 1850 0.228
9 103b_600000 0 1851 0.234
9 84_1000000 0 1852 0.234
9 84_900000 0 1853 0.234
9 84_700000 0.023213772 1854 0.234
9 84_500000 0.010075123 1855 0.257
9 84_400000 0.021510274 1856 0.267
9 84_300000 0 1857 0.289
9 238_0 0 1858 0.289
9 238_200000 0.013841283 1859 0.289
9 157_300000 0 1860 0.302
9 157_400000 0 1861 0.302
9 157_500000 6.79E-005 1862 0.302
9 63b_900000 0.003964849 1863 0.302
9 63b_700000 0.021122097 1864 0.306
9 63b_600000 0.005006654 1865 0.328
9 63b_500000 0.015616485 1866 0.333
9 63b_400000 0 1867 0.348
9 63b_300000 0.003998359 1868 0.348
9 63b_200000 0.020922543 1869 0.352
9 63b_100000 0 1870 0.373
9 63b_0 0 1871 0.373
9 7_3700000 0.011208064 1872 0.373
9 7_3600000 0.034601766 1873 0.384
9 7_3400000 0 1874 0.419
9 7_3300000 0 1875 0.419
9 7_3100000 0.029681534 1876 0.419
9 7_3000000 0 1877 0.449
9 7_2900000 0 1878 0.449
9 7_2800000 0 1879 0.449
9 7_2700000 0.033692036 1880 0.449
9 7_2600000 0.026970749 1881 0.482
9 7_2500000 0 1882 0.509
9 7_2400000 0.039097341 1883 0.509
9 7_2300000 0 1884 0.548
9 7_2200000 0.035793719 1885 0.548
9 7_2000000 0.000264155 1886 0.584
9 7_1900000 0.020028879 1887 0.584
9 7_1800000 0.014834308 1888 0.604
9 7_1600000 0 1889 0.619
9 7_1500000 0 1890 0.619
9 7_1400000 0.034276547 1891 0.619
9 7_1300000 0.009325483 1892 0.654
9 7_1100000 0 1893 0.663
9 7_1000000 0 1894 0.663
9 7_900000 0.025774461 1895 0.663
9 7_800000 0 1896 0.689
9 7_700000 0.013045913 1897 0.689
9 7_600000 0.019605315 1898 0.702
9 7_500000 0 1899 0.721
9 7_400000 0.00806751 1900 0.721
9 7_300000 0.025389382 1901 0.729
9 7_200000 0 1902 0.755
9 7_100000 1903 0.755'''.split('\n')
SWC = '''10 13_2600000 0.001648996 0.000 1
10 13_2500000 0 0.002 2
10 13_2400000 0.004106542 0.002 3
10 13_2300000 0.009867863 0.006 4
10 13_2200000 0.021953569 0.016 5
10 13_2100000 0.015902331 0.038 6
10 13_2000000 0.008993844 0.053 7
10 13_1900000 0.01483092 0.062 8
10 13_1700000 0.029803482 0.077 9
10 13_1400000 0.011056506 0.107 10
10 13_1300000 0.01298655 0.118 11
10 13_1200000 0.009233107 0.131 12
10 13_1100000 0.005152857 0.140 13
10 13_1000000 0.00297836 0.146 14
10 13_900000 0.008115133 0.149 15
10 13_800000 0.005576761 0.157 16
10 13_700000 0.005210869 0.162 17
10 13_600000 0 0.167 18
10 13_500000 0 0.167 19
10 13_400000 0 0.167 20
10 13_300000 0 0.167 21
10 13_100000 0 0.167 22
10 13_0 0 0.167 23
10 193b_0 0 0.167 24
10 48a_0 0 0.167 25
10 48a_100000 0 0.167 26
10 48a_200000 0 0.167 27
10 48a_300000 0 0.167 28
10 48a_400000 0 0.167 29
10 48a_500000 0 0.167 30
10 48a_600000 0 0.167 31
10 48a_700000 0 0.167 32
10 48a_800000 0 0.167 33
10 324_100000 0 0.167 34
10 324_0 0 0.167 35
10 4a_0 0 0.167 36
10 90_0 0 0.167 37
10 90_100000 0 0.167 38
10 90_200000 0 0.167 39
10 90_300000 0 0.167 40
10 90_500000 0 0.167 41
10 90_600000 0 0.167 42
10 90_700000 0 0.167 43
10 90_800000 0 0.167 44
10 90_900000 0 0.167 45
10 90_1000000 0 0.167 46
10 749_0 0 0.167 47
10 206_0 0 0.167 48
10 206_100000 0 0.167 49
10 206_200000 0 0.167 50
10 206_300000 0 0.167 51
10 206_400000 0 0.167 52
10 223_300000 0 0.167 53
10 223_200000 0 0.167 54
10 223_100000 0.001005631 0.167 55
10 223_0 0 0.168 56
10 445_0 0 0.168 57
10 210_300000 0 0.168 58
10 210_200000 0 0.168 59
10 210_100000 0.002629573 0.168 60
10 210_0 0.006587436 0.171 61
10 40_500000 0.008674425 0.178 62
10 40_600000 0.000304983 0.186 63
10 40_900000 0.012350981 0.187 64
10 40_1100000 0.007110953 0.199 65
10 40_1200000 0 0.206 66
10 40_1300000 0.003678754 0.206 67
10 40_1400000 0.015475872 0.210 68
10 40_1600000 0.009367631 0.225 69
10 33_1600000 0 0.235 70
10 33_1500000 0.002784443 0.235 71
10 33_1400000 0.008936999 0.237 72
10 33_1300000 0.003597781 0.246 73
10 33_1200000 0 0.250 74
10 33_1000000 0 0.250 75
10 33_700000 0 0.250 76
10 33_600000 0 0.250 77
10 33_400000 0.002280801 0.250 78
10 9a_1100000 0.003192115 0.252 79
10 9a_800000 0.00226649 0.255 80
10 9a_500000 0 0.258 81
10 9a_400000 0.006107562 0.258 82
10 9a_300000 0.001693852 0.264 83
10 9a_200000 0.009918904 0.265 84
10 172_500000 0.005044221 0.275 85
10 172_400000 0.002721046 0.280 86
10 172_200000 0 0.283 87
10 50_0 0.012021042 0.283 88
10 50_100000 0.007636625 0.295 89
10 50_200000 0.001404066 0.303 90
10 50_300000 0.003687734 0.304 91
10 50_500000 0.008707402 0.308 92
10 50_800000 0.00356459 0.317 93
10 50_900000 0.00487658 0.320 94
10 50_1100000 0.003925611 0.325 95
10 50_1200000 0.013059745 0.329 96
10 50_1400000 0 0.342 97
10 209_0 0.003536795 0.342 98
10 209_100000 0.010802786 0.346 99
10 490_0 0.003792805 0.356 100
10 193a_300000 0.004537965 0.360 101
10 193a_200000 0.002666668 0.365 102
10 193a_100000 0.004844674 0.367 103
10 193a_0 0.00683222 0.372 104
10 125b_600000 0.01018152 0.379 105
10 125b_500000 0.012623651 0.389 106
10 125b_400000 0.018982127 0.402 107
10 125b_300000 0.01359809 0.421 108
10 125b_200000 0.011138183 0.434 109
10 125b_100000 0.025946806 0.446 110
10 125b_0 0.007026509 0.472 111
10 188_500000 0.009954997 0.479 112
10 188_400000 0.021507545 0.488 113
10 188_300000 0.002163784 0.510 114
10 188_200000 0.005175391 0.512 115
10 311_100000 0.007549799 0.517 116
10 311_0 0.017267759 0.525 117
10 204_0 0.020521993 0.542 118
10 204_100000 0.012053349 0.563 119
10 204_200000 0.0123461 0.575 120
10 204_300000 0.010196304 0.587 121
10 204_400000 0.010327736 0.597 122
10 87_0 0.023935061 0.608 123
10 87_100000 0.005193494 0.632 124
10 87_200000 0.010030452 0.637 125
10 87_300000 0.011391215 0.647 126
10 87_400000 0.01500743 0.658 127
10 87_500000 0.006657482 0.673 128
10 87_600000 0.012899204 0.680 129
10 87_700000 0.011202638 0.693 130
10 87_800000 0.008174531 0.704 131
10 87_900000 0.009527307 0.712 132
10 87_1000000 0.009332319 0.722 133
10 159_0 0.014133171 0.731 134
10 159_100000 0.008041811 0.745 135
10 159_200000 0.006784867 0.753 136
10 159_300000 0.003734062 0.760 137
10 159_400000 0 0.764 138
10 159_500000 0.764 139
11 75_0 0 0.764 140
11 75_100000 0.001233734 0.764 141
11 75_200000 0 0.765 142
11 75_300000 0.007240878 0.765 143
11 75_400000 0.001065748 0.772 144
11 75_500000 0.005033756 0.773 145
11 75_600000 0.010206621 0.778 146
11 75_700000 0.004730949 0.788 147
11 75_800000 0.011031533 0.793 148
11 75_900000 0.004416283 0.804 149
11 75_1000000 0.005055732 0.809 150
11 75_1100000 0.011528601 0.814 151
11 228_0 0.009402918 0.825 152
11 228_100000 0.011635891 0.835 153
11 228_200000 0.000606894 0.846 154
11 228_300000 0.010802569 0.847 155
11 273b_0 0.004813308 0.858 156
11 273b_100000 0.045099669 0.862 157
11 213_100000 0.005229966 0.908 158
11 213_200000 0.008231381 0.913 159
11 213_300000 0.005526544 0.921 160
11 63a_0 0.019102582 0.927 161
11 63a_100000 0.008321972 0.946 162
11 63a_200000 0.016368929 0.954 163
11 257b_0 0.023600553 0.970 164
11 30_1800000 0.004177608 0.994 165
11 30_1700000 0.031582002 0.998 166
11 30_1600000 0.011805496 1.030 167
11 30_1500000 0.009873319 1.042 168
11 30_1400000 0.029576936 1.051 169
11 30_1300000 0.026875437 1.081 170
11 30_1200000 0.022761265 1.108 171
11 48b_600000 0.020807298 1.131 172
11 48b_500000 0.019724331 1.151 173
11 48b_400000 0.012267476 1.171 174
11 48b_300000 0.008882545 1.183 175
11 48b_100000 0.003832011 1.192 176
11 243_0 0.004941462 1.196 177
11 243_100000 0 1.201 178
11 243_200000 0.002968643 1.201 179
11 243_300000 0.007687901 1.204 180
11 182_300000 0.001289895 1.212 181
11 182_200000 0.011597284 1.213 182
11 182_100000 0.010102083 1.225 183
11 185_200000 0.001848248 1.235 184
11 185_100000 0.004590389 1.237 185
11 185_0 0.003426018 1.241 186
11 39b_100000 0.009276315 1.245 187
11 39b_200000 0.007007184 1.254 188
11 39b_300000 0.003557437 1.261 189
11 39b_500000 0.015706761 1.264 190
11 39b_600000 0.006634841 1.280 191
11 47b_100000 0.00250748 1.287 192
11 47b_200000 0.035139735 1.289 193
11 47b_500000 0 1.324 194
11 49_100000 0.04184338 1.324 195
11 49_200000 0.000649166 1.366 196
11 49_300000 0 1.367 197
11 49_600000 7.74E-005 1.367 198
11 49_1000000 0.003421694 1.367 199
11 49_1100000 0 1.370 200
11 49_1200000 0 1.370 201
11 49_1300000 0 1.370 202
11 239_300000 0 1.370 203
11 239_200000 0.002505483 1.370 204
11 239_100000 0.006132704 1.373 205
11 239_0 0.001050067 1.379 206
11 6b_1000000 0 1.380 207
11 6b_1200000 0.000644293 1.380 208
11 6b_1300000 0 1.381 209
11 131_600000 0.002393213 1.381 210
11 131_400000 0 1.383 211
11 100_900000 0.000577566 1.383 212
11 100_800000 0.005626526 1.384 213
11 100_700000 0 1.389 214
11 100_600000 0 1.389 215
11 100_400000 0 1.389 216
11 100_300000 0 1.389 217
11 100_100000 0 1.389 218
11 100_0 0 1.389 219
11 598_0 0 1.389 220
11 145_0 0 1.389 221
11 162_100000 0 1.389 222
11 162_200000 0 1.389 223
11 162_300000 0.001212805 1.389 224
11 162_400000 0 1.391 225
11 162_500000 0.000471365 1.391 226
11 167_200000 0.000917133 1.391 227
11 167_100000 0 1.392 228
11 779_0 0 1.392 229
11 22a_0 0.000537859 1.392 230
11 221a_100000 0.000492506 1.392 231
11 221a_0 0 1.393 232
11 161_0 0 1.393 233
11 161_100000 0 1.393 234
11 161_200000 0 1.393 235
11 161_300000 0 1.393 236
11 161_400000 0.003473816 1.393 237
11 161_500000 0.005742274 1.396 238
11 22b_100000 0.011265826 1.402 239
11 22b_200000 0.011301488 1.413 240
11 22b_300000 0.008266272 1.425 241
11 22b_400000 0 1.433 242
11 22b_500000 0.008830753 1.433 243
11 22b_600000 0.001964515 1.442 244
11 22b_700000 0.015555559 1.444 245
11 22b_800000 0.008195987 1.459 246
11 22b_900000 0.003424934 1.468 247
11 22b_1000000 0.012714403 1.471 248
11 22b_1100000 0 1.484 249
11 22b_1200000 0.123973493 1.484 250
11 22b_1300000 1.608 251
12 363_0 0.104407405 1.608 252
12 58b_900000 0.003465198 1.712 253
12 58b_800000 0 1.716 254
12 58b_700000 0.001881242 1.716 255
12 58b_600000 0.006212247 1.717 256
12 58b_500000 0.009847855 1.724 257
12 58b_400000 0.023002209 1.733 258
12 58b_300000 0.011687581 1.756 259
12 58b_200000 0.001477733 1.768 260
12 58b_100000 0.009466723 1.770 261
12 220_400000 0 1.779 262
12 220_300000 0.00524094 1.779 263
12 220_200000 0.005689219 1.784 264
12 220_100000 0.000835552 1.790 265
12 220_0 0.003397135 1.791 266
12 336_0 0 1.794 267
12 336_100000 0.004927872 1.794 268
12 132_700000 0 1.799 269
12 132_600000 0 1.799 270
12 132_500000 0 1.799 271
12 132_400000 0 1.799 272
12 132_300000 0 1.799 273
12 132_200000 0.001989415 1.799 274
12 132_100000 0.007995507 1.801 275
12 132_0 0.007124919 1.809 276
12 224_300000 0.007903727 1.816 277
12 224_100000 0 1.824 278
12 224_0 0.022671532 1.824 279
12 349_0 0 1.847 280
12 332_0 0.006151711 1.847 281
12 332_100000 0.013319306 1.853 282
12 299_300000 0.00887416 1.866 283
12 201_300000 4.56E-005 1.875 284
12 201_100000 0.011049818 1.875 285
12 91_300000 0.006027564 1.886 286
12 91_400000 0.003812638 1.892 287
12 91_500000 0 1.896 288
12 91_600000 0.004126984 1.896 289
12 91_800000 0 1.900 290
12 31_1600000 0 1.900 291
12 31_800000 0.002429945 1.900 292
12 31_700000 0 1.903 293
12 31_500000 0.006134724 1.903 294
12 31_200000 0.002150334 1.909 295
12 31_100000 0 1.911 296
12 114a_200000 0.000557716 1.911 297
12 114a_300000 0 1.912 298
12 160_400000 0 1.912 299
12 160_300000 0.001374283 1.912 300
12 287_0 0 1.913 301
12 287_100000 0.023607944 1.913 302
12 78_100000 0.01557459 1.937 303
12 450_0 0.007165961 1.952 304
12 241_300000 0 1.959 305
12 241_100000 0.006652076 1.959 306
12 241_0 0.007806887 1.966 307
12 143_400000 0.007203571 1.974 308
12 143_200000 0.012811034 1.981 309
12 143_100000 0.022774543 1.994 310
12 819_0 0 2.017 311
12 271_100000 0 2.017 312
12 271_200000 0.00868468 2.017 313
12 125a_0 0 2.025 314
12 309_0 0.001233926 2.025 315
12 250_200000 0.028722333 2.026 316
12 142_600000 0.047183999 2.055 317
12 142_0 2.04E-002 2.102 318
12 435_0 0.01133004 2.123 319
12 140a_200000 0.011003558 2.134 320
12 140a_100000 0.018402839 2.145 321
12 140a_0 0.003766295 2.164 322
12 37_1700000 0.010531698 2.167 323
12 37_1600000 0.005788966 2.178 324
12 37_1500000 0.014751205 2.184 325
12 37_1300000 0.004917561 2.198 326
12 37_1200000 0.00830276 2.203 327
12 37_1100000 0.002942086 2.212 328
12 37_1000000 0.005694471 2.215 329
12 37_900000 0.012089592 2.220 330
12 37_800000 0.013613064 2.232 331
12 37_700000 0.012566842 2.246 332
12 37_500000 0.0113585 2.258 333
12 37_400000 0.002886937 2.270 334
12 37_300000 0.024594656 2.273 335
12 37_100000 0 2.297 336
12 37_0 0.022579465 2.297 337
12 334_100000 0 2.320 338
12 334_0 0.006168706 2.320 339
12 297_100000 0.006404927 2.326 340
12 297_0 0.002369084 2.332 341
12 39a_800000 0.015395079 2.335 342
12 39a_700000 0.014426523 2.350 343
12 39a_600000 0.017586996 2.365 344
12 39a_500000 0.005351214 2.382 345
12 39a_400000 0.004051775 2.388 346
12 39a_300000 0.009575343 2.392 347
12 39a_200000 0.008644242 2.401 348
12 39a_100000 0.009419626 2.410 349
12 39a_0 0.00903271 2.419 350
12 17b_0 0.001485848 2.428 351
12 17b_100000 0 2.430 352
12 17b_200000 0.00672139 2.430 353
12 17b_300000 0.002651958 2.437 354
12 17b_400000 0.004733864 2.439 355
12 17b_500000 0.011231764 2.444 356
12 17b_600000 0 2.455 357
12 17b_700000 0.003700426 2.455 358
12 17b_800000 0.00697947 2.459 359
12 17b_900000 0.007982141 2.466 360
12 17b_1000000 0.006440224 2.474 361
12 17b_1100000 0 2.480 362
12 17b_1200000 0.003054112 2.480 363
12 44b_0 0.001701715 2.483 364
12 44b_100000 0.003774316 2.485 365
12 44b_200000 0.004568598 2.489 366
12 44b_300000 0.000686211 2.493 367
12 44b_400000 0 2.494 368
12 44b_500000 0.026617156 2.494 369
12 683_0 2.521 370
13 369_100000 0.125508928 2.521 371
13 369_0 0.056119984 2.646 372
13 139_0 0.017212763 2.702 373
13 139_100000 0.067061868 2.719 374
13 139_300000 0.016989519 2.787 375
13 139_400000 0.009300771 2.804 376
13 139_600000 0 2.813 377
13 329_0 0.00170253 2.813 378
13 329_100000 0.003581432 2.815 379
13 129b_400000 0.002294996 2.818 380
13 129b_300000 0.01351317 2.820 381
13 129b_200000 0.01838891 2.834 382
13 52_1400000 0.005746936 2.852 383
13 52_1300000 0.00360806 2.858 384
13 52_1200000 0.009859627 2.862 385
13 52_800000 0.001198394 2.872 386
13 52_500000 0.048084828 2.873 387
13 52_300000 0.009983198 2.921 388
13 52_200000 0.001310843 2.931 389
13 52_100000 0.008984457 2.932 390
13 52_0 0.059958892 2.941 391
13 122_0 0 3.001 392
13 122_100000 0 3.001 393
13 122_200000 0.010400212 3.001 394
13 122_300000 0 3.011 395
13 122_400000 0.010278706 3.011 396
13 122_500000 0.005186666 3.022 397
13 122_600000 0.018553796 3.027 398
13 235_300000 0 3.045 399
13 235_200000 0.124279247 3.045 400
13 235_0 0.088680497 3.170 401
13 138_300000 0.014331749 3.258 402
13 402_0 0.041191019 3.273 403
13 236_100000 0.104289093 3.314 404
13 236_200000 0.011878409 3.418 405
13 186_300000 0.00738079 3.430 406
13 28_0 0.005242803 3.438 407
13 28_500000 0.005920231 3.443 408
13 28_600000 0 3.449 409
13 28_1100000 0 3.449 410
13 28_1200000 0.012820408 3.449 411
13 28_1300000 0 3.461 412
13 28_1400000 0.00082915 3.461 413
13 28_1600000 0 3.462 414
13 34_900000 0.009668814 3.462 415
13 34_1000000 0 3.472 416
13 34_1100000 0 3.472 417
13 34_1200000 0.001692781 3.472 418
13 34_1400000 0.011766419 3.474 419
13 34_1500000 0 3.485 420
13 34_1600000 0.00315037 3.485 421
13 34_1700000 0.012263612 3.489 422
13 225_300000 0 3.501 423
13 225_200000 0.012410863 3.501 424
13 107_200000 0.006090247 3.513 425
13 107_300000 0.003628567 3.519 426
13 107_400000 0.019776763 3.523 427
13 107_800000 0.002640349 3.543 428
13 230_0 0.007855352 3.545 429
13 230_200000 0 3.553 430
13 230_300000 0.004462114 3.553 431
13 174_0 0.008103757 3.558 432
13 174_100000 0.012840779 3.566 433
13 174_200000 0.023517364 3.579 434
13 174_400000 0.001702359 3.602 435
13 174_500000 0.004871865 3.604 436
13 344_0 0.005168101 3.609 437
13 108_900000 0 3.614 438
13 108_800000 0.011416405 3.614 439
13 108_700000 0.009818226 3.625 440
13 108_600000 0.002405718 3.635 441
13 108_500000 0.002943137 3.638 442
13 108_400000 0.005137465 3.641 443
13 108_300000 0.008245482 3.646 444
13 108_200000 0.010807413 3.654 445
13 245_300000 0 3.665 446
13 245_200000 0 3.665 447
13 245_100000 0 3.665 448
13 245_0 0.003454751 3.665 449
13 4b_3800000 0.003021615 3.668 450
13 4b_3700000 0.006362332 3.671 451
13 4b_3600000 0.005123542 3.678 452
13 4b_3500000 0.008944019 3.683 453
13 4b_3400000 0.001211973 3.692 454
13 4b_3300000 0.007295931 3.693 455
13 4b_3200000 0.006851126 3.700 456
13 4b_3100000 0.012798834 3.707 457
13 4b_3000000 0 3.720 458
13 4b_2900000 0.005198013 3.720 459
13 4b_2800000 0.009251553 3.725 460
13 4b_2700000 0.009174683 3.734 461
13 4b_2600000 0.006094334 3.743 462
13 4b_2500000 0.011240594 3.749 463
13 4b_2400000 0.015265358 3.761 464
13 4b_2300000 0.013921531 3.776 465
13 4b_2200000 0.010348231 3.790 466
13 4b_2100000 0.006642691 3.800 467
13 4b_2000000 0.004262248 3.807 468
13 4b_1900000 0.011541576 3.811 469
13 4b_1800000 0.002359479 3.823 470
13 4b_1700000 0.009492342 3.825 471
13 4b_1600000 0.008467441 3.835 472
13 4b_1500000 0.006264901 3.843 473
13 4b_1400000 0.004229624 3.849 474
13 4b_1300000 0.004435932 3.854 475
13 4b_1200000 0.009605872 3.858 476
13 4b_1100000 0.004404603 3.868 477
13 4b_1000000 0.006910169 3.872 478
13 4b_900000 0.00230879 3.879 479
13 4b_800000 0.00512803 3.881 480
13 4b_700000 0.005144594 3.886 481
13 4b_600000 0.004664922 3.891 482
13 4b_500000 0.002764064 3.896 483
13 4b_400000 0.003299347 3.899 484
13 4b_300000 0.008695574 3.902 485
13 4b_200000 0.032827036 3.911 486
13 4b_100000 0.001031441 3.944 487
13 4b_0 0.004469599 3.945 488
13 83b_800000 0.004295403 3.949 489
13 83b_700000 0.003952548 3.954 490
13 83b_600000 0.004447997 3.957 491
13 83b_500000 0.006850764 3.962 492
13 83b_400000 0.009205774 3.969 493
13 83b_300000 0.001878765 3.978 494
13 83b_200000 0.010936184 3.980 495
13 83b_100000 0.000134752 3.991 496
13 83b_0 3.991 497
14 102_100000 0.001786208 3.991 498
14 102_200000 0.00909838 3.993 499
14 102_300000 0.008126787 4.002 500
14 102_400000 0.015670219 4.010 501
14 102_500000 0.0098324 4.026 502
14 102_600000 0.014372441 4.035 503
14 102_700000 0.003594866 4.050 504
14 102_800000 0.012119413 4.053 505
14 102_900000 0.010680623 4.066 506
14 6c_0 0.016067805 4.076 507
14 6c_100000 0.005296366 4.092 508
14 26a_0 0.007188441 4.098 509
14 26a_100000 0.010859472 4.105 510
14 26a_200000 0.011229204 4.116 511
14 26a_300000 0.026700291 4.127 512
14 26a_400000 0.004761654 4.154 513
14 26a_500000 0.014386471 4.158 514
14 26a_600000 0.011825691 4.173 515
14 26a_700000 0 4.185 516
14 26a_800000 0 4.185 517
14 26a_900000 0 4.185 518
14 26a_1000000 0 4.185 519
14 26a_1100000 0 4.185 520
14 26a_1200000 0 4.185 521
14 26a_1300000 0 4.185 522
14 26a_1400000 0 4.185 523
14 26a_1500000 0 4.185 524
14 278_0 0 4.185 525
14 278_100000 0 4.185 526
14 148_100000 0 4.185 527
14 148_200000 0 4.185 528
14 148_300000 0 4.185 529
14 148_400000 0 4.185 530
14 148_500000 0 4.185 531
14 148_600000 0 4.185 532
14 198_0 0.000120086 4.185 533
14 198_100000 0 4.185 534
14 198_200000 0 4.185 535
14 198_300000 0 4.185 536
14 101_0 0 4.185 537
14 101_100000 0 4.185 538
14 101_400000 0 4.185 539
14 101_800000 0 4.185 540
14 101_900000 0 4.185 541
14 17a_1100000 0 4.185 542
14 17a_1000000 0 4.185 543
14 17a_900000 0 4.185 544
14 17a_800000 0 4.185 545
14 17a_500000 0 4.185 546
14 17a_400000 0 4.185 547
14 17a_300000 0 4.185 548
14 17a_200000 0 4.185 549
14 17a_0 0 4.185 550
14 208_0 0 4.185 551
14 208_300000 0 4.185 552
14 208_400000 0 4.185 553
14 453_0 0 4.185 554
14 247_200000 0.00894425 4.185 555
14 247_100000 0.012894818 4.194 556
14 247_0 0.009095643 4.206 557
14 419_0 0.006585812 4.216 558
14 207_300000 0.004231922 4.222 559
14 207_200000 0.00258577 4.226 560
14 207_100000 0.004109288 4.229 561
14 207_0 0 4.233 562
14 164_500000 0.00746018 4.233 563
14 164_400000 0.001588814 4.241 564
14 164_300000 0.011940038 4.242 565
14 164_200000 0.009163007 4.254 566
14 164_100000 0.007486158 4.263 567
14 164_0 0.016027916 4.271 568
14 123_700000 0.005726704 4.287 569
14 123_600000 0.00558625 4.292 570
14 123_500000 0.004837025 4.298 571
14 123_400000 0.006049779 4.303 572
14 123_300000 0.005396218 4.309 573
14 123_200000 0.003750408 4.314 574
14 123_100000 0.000811278 4.318 575
14 123_0 0.003376175 4.319 576
14 15_0 0.003222673 4.322 577
14 15_100000 0.007297251 4.326 578
14 15_200000 0.003351178 4.333 579
14 15_300000 0.003707861 4.336 580
14 15_400000 0.005038147 4.340 581
14 15_500000 0.010590621 4.345 582
14 15_700000 0 4.355 583
14 15_800000 0.004970978 4.355 584
14 15_1100000 0.004552635 4.360 585
14 15_2500000 0.001691872 4.365 586
14 127_500000 0 4.367 587
14 127_400000 2.00E-005 4.367 588
14 127_300000 0.001302388 4.367 589
14 127_200000 0 4.368 590
14 168_200000 0.000574744 4.368 591
14 168_300000 0.000889246 4.369 592
14 168_400000 0.002039963 4.369 593
14 168_500000 0.00130395 4.372 594
14 211_300000 0 4.373 595
14 211_200000 0 4.373 596
14 211_100000 0.005597368 4.373 597
14 211_0 0.008381789 4.378 598
14 300_0 0 4.387 599
14 300_100000 0 4.387 600
14 300_200000 0.008277686 4.387 601
14 126_700000 0.008568325 4.395 602
14 126_600000 0.001300532 4.404 603
14 126_500000 0.00267288 4.405 604
14 126_400000 0.019026163 4.408 605
14 126_200000 0.013457031 4.427 606
14 126_0 0.008614876 4.440 607
14 361_0 0.013595084 4.449 608
14 121_700000 0.002749267 4.462 609
14 121_600000 0.011707302 4.465 610
14 121_200000 0.016219313 4.477 611
14 1042_0 0.001078969 4.493 612
14 303_0 0.010615116 4.494 613
14 195_0 0 4.505 614
14 195_100000 0.002186391 4.505 615
14 195_400000 0.003750788 4.507 616
14 219_0 0.00962152 4.511 617
14 219_200000 0 4.520 618
14 366_0 0.008150784 4.520 619
14 128_0 0.008991794 4.528 620
14 128_100000 0.003004957 4.537 621
14 128_400000 0.005100758 4.540 622
14 128_600000 0.005930966 4.546 623
14 128_700000 0.007008955 4.551 624
14 178_100000 0.011439715 4.558 625
14 178_200000 0 4.570 626
14 178_300000 0.002777833 4.570 627
14 178_400000 0.003249458 4.573 628
14 178_500000 0.017447503 4.576 629
14 140b_400000 0.007742828 4.593 630
14 140b_300000 0.003157644 4.601 631
14 140b_200000 0.01717283 4.604 632
14 92_0 0.015370221 4.621 633
14 92_100000 0.001280367 4.637 634
14 92_200000 0.007662976 4.638 635
14 92_300000 0.011686723 4.646 636
14 92_400000 0.014916823 4.657 637
14 92_500000 0.001149492 4.672 638
14 92_600000 0.006543591 4.674 639
14 92_700000 0.004078973 4.680 640
14 92_800000 0.004338819 4.684 641
14 92_900000 0.006177214 4.688 642
14 92_1000000 0.007608137 4.695 643
14 1131_0 0.004280773 4.702 644
14 459_0 0.00751722 4.707 645
14 290_0 0.001698943 4.714 646
14 290_100000 0.003279146 4.716 647
14 290_200000 0.020743442 4.719 648
14 2_100000 0.001909071 4.740 649
14 2_200000 0.002054007 4.742 650
14 2_300000 0.013735066 4.744 651
14 2_400000 0.004445543 4.757 652
14 2_500000 0.000968728 4.762 653
14 2_600000 0.005387975 4.763 654
14 2_700000 0.002119015 4.768 655
14 2_800000 0.000551331 4.770 656
14 2_900000 0.008184257 4.771 657
14 2_1000000 0.012717125 4.779 658
14 2_1100000 0.010864447 4.792 659
14 2_1200000 0.007513986 4.803 660
14 2_1300000 0.044504142 4.810 661
14 2_1900000 0.005822983 4.855 662
14 2_2100000 0.027143573 4.861 663
14 2_2800000 0 4.888 664
14 2_2900000 0.032112959 4.888 665
14 2_3200000 0.001752974 4.920 666
14 2_3300000 0.014182341 4.922 667
14 2_3500000 0.012642468 4.936 668
14 2_3600000 0 4.948 669
14 2_3700000 0.004783364 4.948 670
14 2_3800000 0.00796267 4.953 671
14 2_3900000 0.008020301 4.961 672
14 2_4200000 0.006579579 4.969 673
14 2_4300000 0 4.976 674
14 2_4400000 0.003808657 4.976 675
14 2_4500000 0.005825635 4.980 676
14 58a_0 0.013092623 4.985 677
14 58a_100000 0.001752767 4.998 678
14 58a_200000 0.007553949 5.000 679
14 58a_300000 0.000373813 5.008 680
14 58a_400000 0.00384941 5.008 681
14 58a_500000 0 5.012 682
14 396_0 5.012 683
1 79_0 0 5.012 684
1 79_100000 0.005236374 5.012 685
1 79_200000 0.003802118 5.017 686
1 79_300000 0.013226555 5.021 687
1 79_400000 0.01099287 5.034 688
1 79_500000 0.007110018 5.045 689
1 79_600000 0.004999427 5.052 690
1 79_700000 0.009985951 5.057 691
1 79_800000 0.001655388 5.067 692
1 79_900000 0.018659372 5.069 693
1 79_1000000 0.002735878 5.088 694
1 79_1100000 0.004307852 5.090 695
1 69_0 0.021123444 5.095 696
1 69_100000 0.000582747 5.116 697
1 69_200000 0.006146165 5.116 698
1 69_300000 0.005239698 5.123 699
1 69_400000 0.002549965 5.128 700
1 69_500000 0.014869148 5.130 701
1 69_600000 0.002671835 5.145 702
1 69_700000 0.00332457 5.148 703
1 69_800000 0.005532949 5.151 704
1 69_900000 0.009340299 5.157 705
1 69_1000000 0.009305951 5.166 706
1 69_1100000 0 5.175 707
1 69_1200000 0.004349072 5.175 708
1 181_0 0.012526323 5.180 709
1 181_100000 0.005078254 5.192 710
1 181_200000 0.008573609 5.197 711
1 181_300000 0.005316144 5.206 712
1 181_400000 0.003485086 5.211 713
1 181_500000 0.004545201 5.215 714
1 559_0 0.008367265 5.219 715
1 60_0 0.023118549 5.228 716
1 60_200000 0.022519906 5.251 717
1 60_300000 0.022825356 5.273 718
1 60_500000 0.006356239 5.296 719
1 60_600000 0.003184987 5.302 720
1 60_700000 0.016476055 5.306 721
1 60_800000 0.010692547 5.322 722
1 60_900000 0.003184579 5.333 723
1 60_1000000 0.022393722 5.336 724
1 60_1100000 0.014737371 5.358 725
1 60_1200000 0.011079857 5.373 726
1 60_1300000 0.00278 5.384 727
1 60_1400000 0.006552469 5.387 728
1 333_0 0 5.394 729
1 333_100000 0.010731534 5.394 730
1 165_0 0.007626803 5.404 731
1 165_100000 0.002594707 5.412 732
1 165_200000 0.008901154 5.414 733
1 165_300000 0.002829999 5.423 734
1 165_400000 0.004055288 5.426 735
1 165_500000 0.004423948 5.430 736
1 362_100000 0.010347645 5.435 737
1 308_100000 0.022305957 5.445 738
1 314_100000 0.003749539 5.467 739
1 314_0 0.017318741 5.471 740
1 1184_0 0.007533995 5.488 741
1 26b_400000 0.0148996 5.496 742
1 26b_300000 0.008858131 5.511 743
1 26b_200000 0.008560015 5.520 744
1 26b_100000 0.007808413 5.528 745
1 240_100000 0.019594354 5.536 746
1 82_1000000 0.011127165 5.556 747
1 82_900000 0.015371639 5.567 748
1 82_800000 0.003377287 5.582 749
1 82_600000 0.002276204 5.586 750
1 82_500000 0 5.588 751
1 82_400000 0 5.588 752
1 82_300000 0.007118612 5.588 753
1 82_200000 0.004770183 5.595 754
1 82_100000 0 5.600 755
1 427_0 0 5.600 756
1 80a_900000 0.006157629 5.600 757
1 80a_800000 0.002513424 5.606 758
1 80a_600000 0.000504458 5.608 759
1 80a_500000 0 5.609 760
1 80a_400000 0 5.609 761
1 80a_300000 0.0018894 5.609 762
1 80a_200000 0 5.611 763
1 80a_0 0.004289823 5.611 764
1 254b_100000 0.003327229 5.615 765
1 83a_100000 0.002778082 5.618 766
1 83a_0 0 5.621 767
1 187_0 0.001589947 5.621 768
1 74_1100000 0 5.623 769
1 232_300000 0.002446784 5.623 770
1 232_100000 0 5.625 771
1 146_0 0.001988646 5.625 772
1 146_100000 0.011029261 5.627 773
1 146_200000 0.005326611 5.638 774
1 115b_400000 0 5.644 775
1 115b_300000 0.00656187 5.644 776
1 115b_200000 0.072254129 5.650 777
1 115b_100000 0.038793624 5.722 778
1 563_0 5.761 779
2 18_2200000 0.021231451 5.761 780
2 18_2100000 0.001280662 5.782 781
2 18_2000000 0.001744453 5.784 782
2 18_1900000 0.008035859 5.785 783
2 18_1800000 0.022991822 5.793 784
2 18_1700000 0.004443652 5.816 785
2 18_1600000 0.004117573 5.821 786
2 18_1500000 0.008635586 5.825 787
2 18_1400000 0.000770302 5.834 788
2 18_1300000 0.01190485 5.834 789
2 18_1200000 0.004839951 5.846 790
2 18_1100000 0.001293559 5.851 791
2 18_1000000 0.00685989 5.852 792
2 18_900000 0.006092522 5.859 793
2 18_800000 0.00456508 5.865 794
2 18_700000 0.00615194 5.870 795
2 18_600000 0.004466066 5.876 796
2 18_500000 0.004223627 5.881 797
2 18_400000 0.010343395 5.885 798
2 18_300000 0.014002319 5.895 799
2 18_200000 0.008519388 5.909 800
2 18_100000 0.004135878 5.918 801
2 18_0 0.005101757 5.922 802
2 89_0 0 5.927 803
2 89_100000 0.01191448 5.927 804
2 89_200000 0.002049529 5.939 805
2 89_300000 0.014452303 5.941 806
2 89_400000 0.001356572 5.955 807
2 89_500000 0.007732516 5.957 808
2 89_600000 0.006652654 5.964 809
2 89_700000 0.010782078 5.971 810
2 89_800000 0.00460085 5.982 811
2 89_900000 0.009467793 5.986 812
2 89_1000000 0 5.996 813
2 44a_700000 0.007776468 5.996 814
2 44a_600000 0.002595487 6.004 815
2 44a_500000 0.004894613 6.006 816
2 44a_400000 0.001254365 6.011 817
2 44a_300000 0.005250738 6.012 818
2 44a_200000 0.003118922 6.018 819
2 44a_100000 0 6.021 820
2 212_0 0.007987488 6.021 821
2 212_100000 0.013531395 6.029 822
2 212_200000 2.30E-005 6.042 823
2 212_300000 1.84E-005 6.042 824
2 212_400000 0.003748702 6.042 825
2 249_0 0.002508619 6.046 826
2 249_100000 0.000866411 6.049 827
2 249_200000 0.001084061 6.049 828
2 81_0 0 6.051 829
2 81_100000 0.005123987 6.051 830
2 81_200000 0.002150941 6.056 831
2 81_300000 0.000650836 6.058 832
2 81_400000 0 6.059 833
2 81_500000 0.002758474 6.059 834
2 81_700000 0.005220967 6.061 835
2 81_800000 0.002035327 6.066 836
2 81_900000 0 6.069 837
2 112_0 0 6.069 838
2 112_100000 0.001597846 6.069 839
2 112_200000 0 6.070 840
2 112_400000 0.002515703 6.070 841
2 112_500000 0 6.073 842
2 112_600000 0.00136727 6.073 843
2 112_800000 0.008208754 6.074 844
2 216_200000 0.003354162 6.082 845
2 216_300000 0 6.086 846
2 19_100000 0.006292524 6.086 847
2 19_200000 0 6.092 848
2 19_300000 0.003253846 6.092 849
2 19_400000 0.003230491 6.095 850
2 19_600000 0 6.098 851
2 19_700000 0 6.098 852
2 19_800000 0.006053562 6.098 853
2 19_1000000 0 6.104 854
2 19_1100000 0 6.104 855
2 19_1200000 0.000394721 6.104 856
2 19_1300000 0 6.105 857
2 19_1400000 0 6.105 858
2 73_600000 0.002173198 6.105 859
2 73_700000 0 6.107 860
2 73_800000 0 6.107 861
2 73_1100000 0.001991569 6.107 862
2 73_1200000 0 6.109 863
2 65_500000 0 6.109 864
2 65_600000 0.001727386 6.109 865
2 65_700000 0 6.111 866
2 65_1000000 0.002030062 6.111 867
2 65_1100000 0 6.113 868
2 42_100000 0 6.113 869
2 42_200000 0.001602967 6.113 870
2 42_300000 0.006244118 6.114 871
2 42_400000 0.00311856 6.121 872
2 42_600000 0.00935113 6.124 873
2 42_1000000 0.013925939 6.133 874
2 42_1400000 0 6.147 875
2 42_1500000 0.018600329 6.147 876
2 173_500000 0 6.166 877
2 173_400000 1.02E-002 6.166 878
2 173_300000 0.002214347 6.176 879
2 173_200000 0.010809887 6.178 880
2 173_100000 0.009824123 6.189 881
2 173_0 0.006170425 6.199 882
2 41a_0 0.009628191 6.205 883
2 41a_100000 0.011410333 6.214 884
2 41a_200000 0.011577614 6.226 885
2 41a_300000 0.017879386 6.237 886
2 41a_500000 0 6.255 887
2 41a_600000 0.005674263 6.255 888
2 41a_700000 0 6.261 889
2 41a_800000 0.004710979 6.261 890
2 41a_900000 0.005929318 6.266 891
2 41a_1000000 0.006412716 6.272 892
2 41a_1100000 0.00329495 6.278 893
2 41a_1200000 0.003363265 6.281 894
2 41a_1300000 0.006145072 6.285 895
2 41a_1400000 0.007656724 6.291 896
2 41a_1500000 0.003975289 6.298 897
2 429_0 0.006847061 6.302 898
2 151_0 0.002000557 6.309 899
2 151_100000 0.009008649 6.311 900
2 151_200000 0.018528903 6.320 901
2 151_300000 0.020052429 6.339 902
2 151_500000 0.010711444 6.359 903
2 151_600000 0 6.370 904
2 27_0 0.012086976 6.370 905
2 27_100000 0.010782037 6.382 906
2 27_200000 0.012379679 6.392 907
2 27_300000 0.010596951 6.405 908
2 27_400000 0.0100787 6.415 909
2 27_500000 0.003575959 6.425 910
2 27_600000 0.005445865 6.429 911
2 27_700000 0.008091117 6.435 912
2 27_800000 0.017889491 6.443 913
2 27_900000 0.002887487 6.460 914
2 27_1000000 0.012087643 6.463 915
2 27_1100000 0.007974951 6.475 916
2 27_1200000 0.014063079 6.483 917
2 27_1300000 0.016420013 6.498 918
2 27_1400000 0.000914649 6.514 919
2 27_1500000 0.005647552 6.515 920
2 27_1600000 0.006393971 6.520 921
2 27_1700000 0.002458492 6.527 922
2 27_1800000 0 6.529 923
2 27_1900000 0 6.529 924
2 27_2000000 6.529 925
3 35_1400000 0.073218765 6.529 926
3 35_900000 0.092161672 6.603 927
3 35_0 0.032217852 6.695 928
3 70b_0 0.052461652 6.727 929
3 70b_300000 0.046511711 6.779 930
3 70b_600000 0.004020054 6.826 931
3 70b_700000 0.018076658 6.830 932
3 62_500000 0.007424204 6.848 933
3 62_600000 0.028357511 6.855 934
3 62_900000 0 6.884 935
3 62_1200000 0.014347098 6.884 936
3 137_400000 0 6.898 937
3 137_300000 0 6.898 938
3 137_200000 0.004180821 6.898 939
3 137_100000 0 6.902 940
3 137_0 0.001808963 6.902 941
3 55a_300000 0.009212295 6.904 942
3 55a_200000 0.001156894 6.913 943
3 55a_100000 0 6.914 944
3 316_100000 0 6.914 945
3 316_0 0.000880501 6.914 946
3 116_800000 0 6.915 947
3 116_700000 0 6.915 948
3 116_600000 0 6.915 949
3 116_500000 0.003634387 6.915 950
3 116_400000 0 6.919 951
3 116_300000 0.001861143 6.919 952
3 116_100000 0.005241454 6.921 953
3 116_0 0.001532039 6.926 954
3 72_100000 0.000441863 6.928 955
3 389_0 0.000620333 6.928 956
3 23_2100000 0.001775031 6.929 957
3 23_1900000 0.002989809 6.930 958
3 23_1600000 0 6.933 959
3 23_1200000 0 6.933 960
3 23_1000000 0.001367463 6.933 961
3 23_900000 0 6.935 962
3 23_800000 0 6.935 963
3 23_700000 1.54E-003 6.935 964
3 23_600000 0 6.936 965
3 23_200000 0 6.936 966
3 23_0 9.48E-005 6.936 967
3 265_200000 0.000851921 6.936 968
3 265_100000 0 6.937 969
3 265_0 0 6.937 970
3 5a_1700000 0 6.937 971
3 5a_1600000 0 6.937 972
3 5a_1500000 0.011724423 6.937 973
3 5a_1300000 0 6.949 974
3 5a_1200000 0.012207138 6.949 975
3 5a_1100000 0.003489067 6.961 976
3 5a_1000000 0 6.965 977
3 5a_900000 0.004939459 6.965 978
3 5a_700000 0.005917898 6.970 979
3 5a_600000 0.002647378 6.976 980
3 5a_500000 0.010320299 6.978 981
3 5a_400000 0.010663891 6.989 982
3 5a_300000 0.006519763 6.999 983
3 5a_200000 0.008926315 7.006 984
3 106a_0 0.006540673 7.015 985
3 106a_100000 0 7.021 986
3 106a_200000 0.006921333 7.021 987
3 106a_300000 0.007414444 7.028 988
3 106a_400000 0 7.036 989
3 106a_500000 0.012634497 7.036 990
3 106a_600000 0.011707792 7.048 991
3 106a_700000 0.013306625 7.060 992
3 258_200000 0.01560939 7.073 993
3 258_100000 0.002198853 7.089 994
3 258_0 0.01103045 7.091 995
3 169_500000 0.00418184 7.102 996
3 169_400000 0.017057458 7.106 997
3 169_300000 0.019230923 7.123 998
3 169_200000 0.009172286 7.143 999
3 169_100000 0.010059463 7.152 1000
3 169_0 0.007043083 7.162 1001
3 98_1000000 0.005646181 7.169 1002
3 98_900000 0.007023716 7.174 1003
3 98_800000 0.011527859 7.181 1004
3 98_700000 0.014993453 7.193 1005
3 98_500000 0.005682069 7.208 1006
3 98_400000 0.010135706 7.214 1007
3 98_300000 0.007064323 7.224 1008
3 98_200000 0.014216364 7.231 1009
3 98_100000 0.023539292 7.245 1010
3 261_0 0.000824238 7.269 1011
3 261_100000 0.011635717 7.269 1012
3 261_200000 0.008534224 7.281 1013
3 273a_0 0 7.290 1014
3 64_1300000 0.024747729 7.290 1015
3 64_1200000 0.014706437 7.314 1016
3 64_1100000 0.023696687 7.329 1017
3 64_1000000 0.012047894 7.353 1018
3 64_900000 0.003702579 7.365 1019
3 64_800000 0.022017683 7.369 1020
3 64_700000 0.019032767 7.391 1021
3 64_600000 0.012130874 7.410 1022
3 64_500000 0.01287934 7.422 1023
3 64_400000 0.010294095 7.435 1024
3 64_300000 0.002308123 7.445 1025
3 64_200000 0 7.447 1026
3 64_100000 0.053006278 7.447 1027
3 64_0 7.500 1028
4 1_4800000 0 7.500 1029
4 1_4700000 0.008906183 7.500 1030
4 1_4600000 0 7.509 1031
4 1_4500000 0.004503718 7.509 1032
4 1_4400000 0.005220495 7.514 1033
4 1_4300000 0.006274335 7.519 1034
4 1_4200000 0.002428369 7.525 1035
4 1_4100000 0.011702498 7.528 1036
4 1_4000000 0.002436447 7.539 1037
4 1_3900000 0.006781275 7.542 1038
4 1_3800000 0.008222374 7.548 1039
4 1_3700000 0.001887875 7.557 1040
4 1_3600000 0.003441454 7.559 1041
4 1_3500000 0.010975237 7.562 1042
4 1_3400000 0.008070659 7.573 1043
4 1_3300000 0.006947568 7.581 1044
4 1_3200000 0.00539698 7.588 1045
4 1_3100000 0.002565397 7.593 1046
4 1_3000000 0.006878824 7.596 1047
4 1_2900000 0.012110155 7.603 1048
4 1_2800000 0.007186669 7.615 1049
4 1_2700000 0.007763371 7.622 1050
4 1_2600000 0.006602852 7.630 1051
4 1_2500000 0.004499997 7.636 1052
4 1_2400000 0.001296492 7.641 1053
4 1_2300000 0.013734945 7.642 1054
4 1_2200000 0.007397086 7.656 1055
4 1_2100000 0.007209341 7.663 1056
4 1_2000000 0.004916453 7.671 1057
4 1_1900000 0.003867169 7.676 1058
4 1_1800000 0.004720034 7.679 1059
4 1_1700000 0.007330562 7.684 1060
4 1_1600000 0.001900504 7.691 1061
4 1_1500000 0.01463497 7.693 1062
4 1_1400000 0.005712058 7.708 1063
4 1_1300000 0.014127709 7.714 1064
4 1_1200000 0.006147912 7.728 1065
4 1_1100000 0.017454343 7.734 1066
4 1_1000000 0.006431471 7.751 1067
4 1_900000 0.013819203 7.758 1068
4 1_800000 0.001976168 7.772 1069
4 1_700000 0.010785173 7.774 1070
4 1_600000 0.015343437 7.784 1071
4 1_500000 0.010032597 7.800 1072
4 1_400000 0.006847203 7.810 1073
4 1_300000 0.000641115 7.817 1074
4 1_200000 0.003078599 7.817 1075
4 1_100000 0.005103016 7.820 1076
4 1_0 0.007634023 7.825 1077
4 54_1400000 0.007988789 7.833 1078
4 54_1300000 0.001696918 7.841 1079
4 54_1200000 0.01045996 7.843 1080
4 54_1100000 0.00886207 7.853 1081
4 54_1000000 0.005065983 7.862 1082
4 54_900000 0.006213968 7.867 1083
4 54_800000 0.010290498 7.873 1084
4 54_700000 0.010365498 7.884 1085
4 54_600000 0.003490084 7.894 1086
4 54_500000 0.006603648 7.898 1087
4 54_400000 0.004663171 7.904 1088
4 54_300000 0.001313789 7.909 1089
4 54_200000 0.001848929 7.910 1090
4 54_100000 0.004273693 7.912 1091
4 54_0 0.00392165 7.916 1092
4 120_0 0.008797422 7.920 1093
4 120_100000 0 7.929 1094
4 120_200000 0.009486901 7.929 1095
4 120_300000 0.00053285 7.938 1096
4 120_400000 0.003808722 7.939 1097
4 120_600000 0 7.943 1098
4 120_700000 0.003541906 7.943 1099
4 120_800000 0.010520946 7.946 1100
4 150_300000 0.002536132 7.957 1101
4 150_200000 0.003384029 7.959 1102
4 150_100000 0.003593761 7.963 1103
4 150_0 0.003345524 7.966 1104
4 57_1100000 0.002922625 7.970 1105
4 57_700000 0.00710485 7.973 1106
4 57_500000 0 7.980 1107
4 57_400000 0.003146971 7.980 1108
4 57_300000 0.002307963 7.983 1109
4 57_100000 0 7.985 1110
4 254a_0 0 7.985 1111
4 154_0 0 7.985 1112
4 154_100000 0 7.985 1113
4 154_200000 0.001059651 7.985 1114
4 154_400000 0 7.986 1115
4 154_500000 0 7.986 1116
4 110_100000 0.001054956 7.986 1117
4 110_200000 0.001062816 7.987 1118
4 110_300000 0 7.988 1119
4 110_400000 0 7.988 1120
4 110_600000 0 7.988 1121
4 177_0 0 7.988 1122
4 177_100000 0 7.988 1123
4 177_200000 0 7.988 1124
4 177_300000 0.00061873 7.988 1125
4 177_400000 0 7.989 1126
4 134_700000 0.003583844 7.989 1127
4 6a_2100000 0 7.993 1128
4 6a_2000000 0 7.993 1129
4 6a_1900000 0.001286628 7.993 1130
4 6a_1800000 0.003677745 7.994 1131
4 6a_1700000 0.007316938 7.998 1132
4 6a_1600000 0.008399248 8.005 1133
4 6a_1300000 0 8.013 1134
4 6a_1100000 0.007203932 8.013 1135
4 6a_1000000 0.003001028 8.020 1136
4 6a_900000 0.003443784 8.023 1137
4 6a_700000 0.001532678 8.027 1138
4 6a_600000 0.004275786 8.028 1139
4 6a_500000 0.003243576 8.033 1140
4 6a_300000 0.001983605 8.036 1141
4 6a_100000 0.004610478 8.038 1142
4 6a_0 0.004646286 8.043 1143
4 70c_100000 0.00467234 8.047 1144
4 55b_100000 0.012914409 8.052 1145
4 55b_200000 0.000932337 8.065 1146
4 55b_300000 0.008119168 8.066 1147
4 55b_400000 0.000725847 8.074 1148
4 55b_500000 0 8.075 1149
4 55b_600000 0.006187833 8.075 1150
4 55b_1000000 0 8.081 1151
4 443_0 0.005099166 8.081 1152
4 346_0 0.006822613 8.086 1153
4 346_100000 0.005297194 8.093 1154
4 320_0 0.001604243 8.098 1155
4 320_100000 0.008347974 8.100 1156
4 96a_0 0.014234612 8.108 1157
4 96a_100000 0.009872003 8.122 1158
4 96a_200000 0.003061689 8.132 1159
4 96a_400000 0.001955965 8.135 1160
4 96a_500000 0.020237166 8.137 1161
4 96a_600000 0.001657667 8.157 1162
4 96a_700000 0.004109409 8.159 1163
4 96a_800000 0.004797763 8.163 1164
4 96a_900000 0.00606693 8.168 1165
4 85_1100000 0.008343948 8.174 1166
4 85_1000000 0.005039838 8.182 1167
4 85_900000 0.048442518 8.187 1168
4 85_200000 0.004807065 8.236 1169
4 1287_0 0.021700455 8.241 1170
4 196_500000 0.051204023 8.262 1171
4 196_0 0.025997332 8.313 1172
4 384_0 0.023576642 8.339 1173
4 541_0 8.363 1174
5 129a_0 0 8.363 1175
5 129a_100000 0.001658327 8.363 1176
5 129a_200000 0.005514102 8.365 1177
5 66_0 0.012409283 8.370 1178
5 66_100000 0.008855638 8.383 1179
5 66_200000 0.006005305 8.391 1180
5 66_300000 0.003766681 8.397 1181
5 66_400000 0.014835526 8.401 1182
5 66_500000 0.015204642 8.416 1183
5 66_600000 0.009064479 8.431 1184
5 66_700000 0.012116153 8.440 1185
5 66_800000 0.016794034 8.452 1186
5 66_900000 0.010006044 8.469 1187
5 66_1000000 0.015561528 8.479 1188
5 66_1200000 0.015658123 8.495 1189
5 32_0 0.001352366 8.510 1190
5 32_100000 0.018442431 8.512 1191
5 32_200000 0.001258801 8.530 1192
5 32_300000 0.002504636 8.532 1193
5 32_400000 0.005226056 8.534 1194
5 32_500000 0.007900502 8.539 1195
5 32_600000 0.008438929 8.547 1196
5 32_700000 0.00387725 8.556 1197
5 32_800000 0.001986027 8.559 1198
5 32_900000 0.016675061 8.561 1199
5 32_1000000 0.007988582 8.578 1200
5 32_1100000 0.01041851 8.586 1201
5 32_1200000 0 8.597 1202
5 32_1300000 0.015764585 8.597 1203
5 32_1400000 0.019100898 8.612 1204
5 32_1600000 0.008508534 8.631 1205
5 32_1800000 0.021072365 8.640 1206
5 205_100000 0.008661942 8.661 1207
5 205_200000 0 8.670 1208
5 205_300000 0.004791171 8.670 1209
5 205_400000 0.005228593 8.674 1210
5 461_0 0.009807112 8.680 1211
5 242_0 0.022620917 8.690 1212
5 242_200000 0.001098541 8.712 1213
5 242_300000 0.009505639 8.713 1214
5 104a_0 0.008684484 8.723 1215
5 104a_100000 0.011797565 8.731 1216
5 104a_200000 0.023948344 8.743 1217
5 141_100000 0.012497049 8.767 1218
5 244_100000 0.023867933 8.780 1219
5 244_0 0.01609939 8.804 1220
5 111a_600000 0 8.820 1221
5 111a_500000 0.034893988 8.820 1222
5 111a_300000 0.026728868 8.855 1223
5 94_0 0.018202312 8.881 1224
5 94_100000 0.025934444 8.899 1225
5 94_200000 0 8.925 1226
5 94_400000 0.00412659 8.925 1227
5 94_500000 0 8.930 1228
5 94_600000 0.069034664 8.930 1229
5 94_700000 0.019376197 8.999 1230
5 94_800000 0.019926753 9.018 1231
5 94_900000 0.010568288 9.038 1232
5 88_400000 0.030335554 9.048 1233
5 194_100000 0.071787001 9.079 1234
5 226_100000 0.062965904 9.151 1235
5 149_600000 0.104655595 9.214 1236
5 149_300000 0.028327671 9.318 1237
5 368_0 0.009068807 9.346 1238
5 288_0 0.036711504 9.356 1239
5 197_0 0 9.392 1240
5 197_300000 0.008497208 9.392 1241
5 197_400000 0.010090386 9.401 1242
5 327_0 0 9.411 1243
5 327_100000 0.004557128 9.411 1244
5 170_500000 0.007626884 9.415 1245
5 170_400000 0.00164286 9.423 1246
5 170_300000 0.008981625 9.425 1247
5 170_200000 0.007256597 9.434 1248
5 170_100000 0.003818376 9.441 1249
5 170_0 0.006711076 9.445 1250
5 158_600000 0.008218528 9.451 1251
5 158_500000 0.004768445 9.460 1252
5 158_400000 0.012148231 9.464 1253
5 158_300000 0.012637708 9.477 1254
5 158_200000 0.003401134 9.489 1255
5 158_100000 0 9.493 1256
5 158_0 0.120561526 9.493 1257
5 53_1300000 0.070499072 9.613 1258
5 53_1000000 0.020280683 9.684 1259
5 53_900000 0.053472267 9.704 1260
5 53_500000 0.067593466 9.757 1261
5 53_200000 0.052730077 9.825 1262
5 221b_0 9.878 1263
6 270_200000 0.000203112 9.878 1264
6 270_100000 0.001948408 9.878 1265
6 270_0 9.57E-006 9.880 1266
6 104b_500000 0.001607728 9.880 1267
6 104b_400000 6.01E-003 9.882 1268
6 104b_300000 0.004822428 9.888 1269
6 104b_200000 0.01532819 9.892 1270
6 104b_100000 0.003201351 9.908 1271
6 104b_0 0.005181512 9.911 1272
6 51_1400000 0.003114501 9.916 1273
6 51_1300000 0.008443712 9.919 1274
6 51_1200000 0.007771577 9.928 1275
6 51_1100000 0.005274852 9.935 1276
6 51_1000000 0.006655629 9.941 1277
6 51_900000 0.007852347 9.947 1278
6 51_800000 0 9.955 1279
6 51_700000 0.007032662 9.955 1280
6 51_600000 0.007078368 9.962 1281
6 51_500000 0.01575128 9.969 1282
6 51_400000 0.009437904 9.985 1283
6 51_300000 0.006558843 9.994 1284
6 51_200000 0.00522123 10.001 1285
6 51_100000 0.010321708 10.006 1286
6 8_3400000 0.004359723 10.017 1287
6 8_3300000 0.012109964 10.021 1288
6 8_3200000 0.009186414 10.033 1289
6 8_3100000 0.010438978 10.042 1290
6 8_3000000 0.008838798 10.053 1291
6 8_2900000 0.004375099 10.062 1292
6 8_2800000 0.008830473 10.066 1293
6 8_2700000 0.007470596 10.075 1294
6 8_2600000 0.006627761 10.082 1295
6 8_2500000 0.012347006 10.089 1296
6 8_2400000 0.025719154 10.101 1297
6 8_2300000 0.007426127 10.127 1298
6 8_2200000 0.022748349 10.134 1299
6 8_2000000 0.006159486 10.157 1300
6 8_1900000 0.002079952 10.163 1301
6 8_1800000 0.007825747 10.165 1302
6 8_1700000 0.00704801 10.173 1303
6 8_1600000 0.013424576 10.180 1304
6 8_1500000 0.008796214 10.194 1305
6 8_1400000 0.002935275 10.202 1306
6 8_1300000 0.007127902 10.205 1307
6 8_1200000 0.010442535 10.212 1308
6 8_1100000 0.010338709 10.223 1309
6 8_1000000 0.015544371 10.233 1310
6 8_800000 0.005820549 10.249 1311
6 8_700000 0.004698801 10.255 1312
6 8_600000 0.001252534 10.259 1313
6 8_500000 0.011966866 10.261 1314
6 8_400000 0.009221836 10.273 1315
6 8_300000 0.009403336 10.282 1316
6 8_200000 0.000643435 10.291 1317
6 8_100000 0.030496333 10.292 1318
6 293_100000 0.000289679 10.322 1319
6 293_0 0.000844489 10.323 1320
6 379a_0 0.005139836 10.323 1321
6 21_0 0.005687009 10.329 1322
6 21_100000 0.002652403 10.334 1323
6 21_200000 0.011898423 10.337 1324
6 21_300000 0.001815939 10.349 1325
6 21_400000 0.000959996 10.351 1326
6 21_500000 0 10.352 1327
6 21_600000 0.004939674 10.352 1328
6 21_700000 0.00360013 10.357 1329
6 21_1400000 0.001658643 10.360 1330
6 21_1600000 0.003872679 10.362 1331
6 21_1700000 0.00810973 10.366 1332
6 136_200000 0 10.374 1333
6 136_300000 0.001729183 10.374 1334
6 136_400000 0.01261282 10.375 1335
6 136_600000 0.003062005 10.388 1336
6 43_1200000 0.053317502 10.391 1337
6 43_1300000 0.058566433 10.444 1338
6 43_1600000 0.023920656 10.503 1339
6 119_100000 0.017438343 10.527 1340
6 119_200000 0.014333311 10.544 1341
6 119_400000 0.011686584 10.559 1342
6 119_500000 0.017108694 10.570 1343
6 119_600000 0.005447808 10.588 1344
6 307_100000 0.002020237 10.593 1345
6 248_100000 0.017109333 10.595 1346
6 248_0 0.002039184 10.612 1347
6 67_200000 0.017423858 10.614 1348
6 67_300000 0 10.632 1349
6 67_400000 0.014762796 10.632 1350
6 67_500000 0 10.646 1351
6 67_600000 0.005617414 10.646 1352
6 25_1500000 0 10.652 1353
6 25_1400000 0.004689967 10.652 1354
6 25_1200000 0 10.657 1355
6 25_1000000 0.001680873 10.657 1356
6 25_900000 0 10.658 1357
6 25_800000 0 10.658 1358
6 25_600000 0 10.658 1359
6 25_500000 0 10.658 1360
6 25_400000 0.005671335 10.658 1361
6 25_300000 0.002544265 10.664 1362
6 25_200000 0 10.667 1363
6 25_100000 0.007761195 10.667 1364
6 25_0 0.006693812 10.674 1365
6 190_400000 0.00706877 10.681 1366
6 190_300000 0.012403362 10.688 1367
6 190_100000 0.003821577 10.700 1368
6 190_0 0.002663996 10.704 1369
6 179_300000 0.010023503 10.707 1370
6 179_100000 0.004636178 10.717 1371
6 179_0 0.021851272 10.722 1372
6 262_200000 0.001002008 10.743 1373
6 262_100000 0.001627266 10.744 1374
6 262_0 0.014652 10.746 1375
6 16_2400000 0.008705459 10.761 1376
6 16_2200000 0 10.769 1377
6 16_2100000 0.004957172 10.769 1378
6 16_2000000 0.005839148 10.774 1379
6 16_1900000 0.007534505 10.780 1380
6 16_1800000 0.002594836 10.788 1381
6 16_1700000 0.001620183 10.790 1382
6 16_1600000 0.005216507 10.792 1383
6 16_1500000 0.000890052 10.797 1384
6 16_1400000 0.006054102 10.798 1385
6 16_1300000 0.02648303 10.804 1386
6 16_1000000 0.008107474 10.831 1387
6 16_900000 0.013981312 10.839 1388
6 16_800000 0.006963538 10.853 1389
6 16_700000 0.002836587 10.860 1390
6 16_600000 0.00357332 10.863 1391
6 16_500000 0.012899365 10.866 1392
6 16_400000 0.009265821 10.879 1393
6 16_300000 0.001573823 10.888 1394
6 16_200000 0 10.890 1395
6 16_100000 10.890 1396
7 251_200000 0.036879081 10.890 1397
7 251_100000 0 10.927 1398
7 251_0 0.014457842 10.927 1399
7 335_100000 0.032965123 10.941 1400
7 113_900000 0.034649869 10.974 1401
7 113_400000 0.084383834 11.009 1402
7 130_800000 0.100338804 11.093 1403
7 130_600000 0.004903283 11.194 1404
7 130_500000 0.086892929 11.198 1405
7 130_100000 0.013395111 11.285 1406
7 130_0 0.030606896 11.299 1407
7 255_100000 0.039924681 11.329 1408
7 9c_100000 0.024346171 11.369 1409
7 114b_0 0.012541168 11.394 1410
7 114b_100000 0.008587859 11.406 1411
7 114b_200000 0.020201749 11.415 1412
7 114b_400000 0 11.435 1413
7 114b_500000 0.005906567 11.435 1414
7 274_0 0 11.441 1415
7 274_100000 0.003339534 11.441 1416
7 274_200000 0.003102424 11.444 1417
7 29_0 0.00479232 11.447 1418
7 29_100000 0.00901848 11.452 1419
7 29_200000 0.010116716 11.461 1420
7 29_300000 0.007846369 11.471 1421
7 29_400000 0.013890577 11.479 1422
7 29_500000 0.005561574 11.493 1423
7 29_600000 0.003197365 11.498 1424
7 29_700000 0 11.502 1425
7 29_800000 0.001206142 11.502 1426
7 29_900000 0.00505275 11.503 1427
7 29_1000000 0.004733354 11.508 1428
7 29_1100000 0 11.513 1429
7 29_1200000 0.007238661 11.513 1430
7 29_1400000 0.004391891 11.520 1431
7 29_1500000 0 11.524 1432
7 29_1700000 0.009803556 11.524 1433
7 29_1800000 0.006793752 11.534 1434
7 166_0 0.002435313 11.541 1435
7 166_200000 0.000746365 11.543 1436
7 166_300000 0.002175836 11.544 1437
7 166_500000 0.012480014 11.546 1438
7 70a_0 0.005319032 11.559 1439
7 56_1000000 0.002826799 11.564 1440
7 95_0 0.002438056 11.567 1441
7 47a_400000 0.001785712 11.569 1442
7 184_300000 0 11.571 1443
7 105_400000 0.011539193 11.571 1444
7 105_0 0.006910646 11.583 1445
7 313_100000 0.021271464 11.590 1446
7 14_2600000 0.000462904 11.611 1447
7 14_2500000 0 11.611 1448
7 14_2200000 0.008634678 11.611 1449
7 14_2000000 0.009048395 11.620 1450
7 14_1900000 0 11.629 1451
7 14_1700000 0.007560512 11.629 1452
7 14_1600000 0.019383022 11.637 1453
7 14_1500000 0.004890912 11.656 1454
7 14_1400000 0.004007265 11.661 1455
7 14_1100000 0.00401076 11.665 1456
7 14_800000 0.004055075 11.669 1457
7 14_600000 0.023536199 11.673 1458
7 97a_100000 0.037102852 11.696 1459
7 97a_200000 0.026067384 11.734 1460
7 97a_400000 11.760 1461
8 109_800000 0.000863909 11.760 1462
8 109_700000 0.000508066 11.760 1463
8 109_600000 0.000777043 11.761 1464
8 109_500000 0.003215099 11.762 1465
8 109_400000 0.01202418 11.765 1466
8 109_300000 0.001681675 11.777 1467
8 109_200000 0.011716646 11.779 1468
8 109_100000 0.008648903 11.790 1469
8 109_0 0.002812359 11.799 1470
8 233_300000 0.004453473 11.802 1471
8 233_200000 0.014419054 11.806 1472
8 233_100000 0.008473541 11.821 1473
8 233_0 0.006823543 11.829 1474
8 11_0 0.002481368 11.836 1475
8 11_100000 0.001182301 11.838 1476
8 11_200000 0.012444316 11.840 1477
8 11_300000 0.017583061 11.852 1478
8 11_400000 0.008361082 11.870 1479
8 11_500000 0.012110511 11.878 1480
8 11_600000 0.002618645 11.890 1481
8 11_700000 0.007956307 11.893 1482
8 11_800000 0.008603951 11.901 1483
8 11_900000 0.016318161 11.909 1484
8 11_1000000 0.008536466 11.926 1485
8 11_1100000 0.002880646 11.934 1486
8 11_1200000 0.0144084 11.937 1487
8 11_1300000 0.01363838 11.952 1488
8 11_1400000 0.011545403 11.965 1489
8 11_1500000 0.005981926 11.977 1490
8 11_1600000 0.005187606 11.983 1491
8 11_1700000 0.006668371 11.988 1492
8 11_1800000 0.008294111 11.995 1493
8 11_1900000 0.00553641 12.003 1494
8 11_2000000 0.015216457 12.008 1495
8 11_2100000 0.00731889 12.024 1496
8 11_2200000 0.008240441 12.031 1497
8 11_2300000 0.001072881 12.039 1498
8 11_2400000 0.017053267 12.040 1499
8 11_2500000 0.005974482 12.057 1500
8 11_2600000 0.001080102 12.063 1501
8 11_2700000 0.003134453 12.064 1502
8 11_2800000 6.94E-003 12.067 1503
8 11_2900000 4.81E-003 12.074 1504
8 155_0 0.003728429 12.079 1505
8 155_100000 0.003038883 12.083 1506
8 155_200000 0.016426141 12.086 1507
8 155_400000 0.004201966 12.102 1508
8 155_600000 0.016179659 12.107 1509
8 76_300000 0.005230608 12.123 1510
8 76_400000 0.002749989 12.128 1511
8 76_500000 0.024015965 12.131 1512
8 76_700000 0 12.155 1513
8 76_800000 0.005240746 12.155 1514
8 76_900000 0.002513224 12.160 1515
8 59_0 0.000349398 12.163 1516
8 59_100000 0.002065217 12.163 1517
8 59_200000 0 12.165 1518
8 59_300000 0.005437984 12.165 1519
8 59_600000 0 12.170 1520
8 59_700000 0 12.170 1521
8 59_900000 0 12.170 1522
8 59_1000000 0.003504848 12.170 1523
8 59_1100000 0.001175886 12.174 1524
8 59_1200000 0 12.175 1525
8 59_1400000 0.001275144 12.175 1526
8 118_600000 0.000515929 12.176 1527
8 118_500000 0.000573674 12.177 1528
8 118_400000 0 12.177 1529
8 118_300000 0 12.177 1530
8 118_200000 0.003233466 12.177 1531
8 38_1500000 0 12.181 1532
8 38_1400000 0 12.181 1533
8 217_300000 0 12.181 1534
8 217_100000 0 12.181 1535
8 171_100000 0.001047041 12.181 1536
8 171_200000 0 12.182 1537
8 77_1000000 0 12.182 1538
8 77_900000 0 12.182 1539
8 77_800000 0 12.182 1540
8 412_0 0.003405088 12.182 1541
8 46_1400000 0 12.185 1542
8 46_1200000 0.000936631 12.185 1543
8 46_1000000 0.001441838 12.186 1544
8 46_900000 0 12.187 1545
8 46_800000 0 12.187 1546
8 46_700000 0.00392999 12.187 1547
8 46_500000 0.004799772 12.191 1548
8 46_400000 0.00126158 12.196 1549
8 46_300000 0.001023966 12.197 1550
8 46_200000 0 12.198 1551
8 46_100000 0 12.198 1552
8 46_0 0 12.198 1553
8 12b_0 0 12.198 1554
8 12b_200000 0.005546622 12.198 1555
8 12b_400000 0.003782388 12.204 1556
8 12b_500000 0.006646055 12.208 1557
8 12b_600000 0.00102168 12.214 1558
8 12b_700000 0 12.215 1559
8 12b_800000 0.009127054 12.215 1560
8 12b_1000000 0.003932875 12.225 1561
8 12b_1100000 0.007714089 12.229 1562
8 12b_1200000 0.009427634 12.236 1563
8 12b_1300000 0.003799231 12.246 1564
8 12b_1400000 0.001882404 12.249 1565
8 12b_1500000 0.014476655 12.251 1566
8 12b_1600000 0.00604692 12.266 1567
8 12b_1700000 0.002315774 12.272 1568
8 12b_1800000 0.006453999 12.274 1569
8 12b_1900000 0.008615089 12.281 1570
8 12b_2000000 0.008850348 12.289 1571
8 12b_2100000 0.001104767 12.298 1572
8 12b_2200000 0.00710617 12.299 1573
8 12b_2300000 0.004722203 12.306 1574
8 12b_2400000 0 12.311 1575
8 24_2100000 0.005882142 12.311 1576
8 24_2000000 0.006865812 12.317 1577
8 24_1900000 0.002929294 12.324 1578
8 24_1800000 0.005768953 12.327 1579
8 24_1700000 0.000979879 12.333 1580
8 24_1600000 0.003731271 12.333 1581
8 24_1500000 0.000423899 12.337 1582
8 24_1400000 0.002130429 12.338 1583
8 24_1300000 0.005636607 12.340 1584
8 24_1200000 0.004398547 12.345 1585
8 24_1100000 0.001966416 12.350 1586
8 24_1000000 0 12.352 1587
8 24_900000 0.001394895 12.352 1588
8 24_800000 0 12.353 1589
8 24_600000 0.010730139 12.353 1590
8 24_500000 0 12.364 1591
8 24_400000 0.004843466 12.364 1592
8 24_300000 0 12.369 1593
8 24_200000 0.005274319 12.369 1594
8 24_100000 0.004933695 12.374 1595
8 45_1600000 0.010712572 12.379 1596
8 45_1500000 0.004355504 12.390 1597
8 45_1400000 0.014498143 12.394 1598
8 45_1200000 0.004756653 12.409 1599
8 45_1000000 0.003654145 12.413 1600
8 45_900000 0.023880864 12.417 1601
8 45_700000 0.021154099 12.441 1602
8 45_400000 0.014312376 12.462 1603
8 45_300000 0.008849625 12.476 1604
8 45_200000 0 12.485 1605
8 45_100000 0.006948741 12.485 1606
8 45_0 0.004475862 12.492 1607
8 285_200000 0 12.497 1608
8 285_100000 0.008161615 12.497 1609
8 285_0 0.009109457 12.505 1610
8 3_0 0.001982612 12.514 1611
8 3_100000 0.021987967 12.516 1612
8 3_300000 0.007550018 12.538 1613
8 3_400000 0.000268453 12.545 1614
8 3_500000 0 12.546 1615
8 3_600000 0.020133504 12.546 1616
8 3_700000 0.004288382 12.566 1617
8 3_800000 0.013603477 12.570 1618
8 3_900000 0.002139666 12.584 1619
8 3_1100000 0.007706729 12.586 1620
8 3_1400000 0.002704628 12.593 1621
8 3_1500000 0.004258077 12.596 1622
8 3_1600000 0.002168746 12.600 1623
8 3_1700000 0.00674606 12.603 1624
8 3_1800000 0.007918532 12.609 1625
8 3_1900000 0.001385939 12.617 1626
8 3_2000000 0.002264056 12.619 1627
8 3_2100000 0.005165188 12.621 1628
8 3_2200000 0.005378582 12.626 1629
8 3_2300000 0.003151551 12.631 1630
8 3_2400000 0.001985837 12.635 1631
8 3_2500000 0.006597312 12.637 1632
8 3_2600000 0.008868417 12.643 1633
8 3_2700000 0.019347285 12.652 1634
8 3_2900000 0 12.671 1635
8 3_3000000 0.001604539 12.671 1636
8 3_3100000 0.002051128 12.673 1637
8 3_3200000 0.008157689 12.675 1638
8 3_3300000 0.011333271 12.683 1639
8 3_3400000 0.002362501 12.695 1640
8 3_3500000 0.008069282 12.697 1641
8 3_3600000 0.008031565 12.705 1642
8 3_3700000 0.005659029 12.713 1643
8 3_3800000 0.002453738 12.719 1644
8 3_3900000 0.001728361 12.721 1645
8 3_4000000 0.002630478 12.723 1646
8 3_4100000 2.31E-003 12.726 1647
8 3_4200000 0 12.728 1648
8 3_4300000 12.728 1649
9 68b_1100000 0.014229565 12.728 1650
9 68b_900000 0.005883527 12.742 1651
9 68b_800000 0.011987805 12.748 1652
9 68b_700000 0.012032062 12.760 1653
9 68b_600000 0.008533478 12.772 1654
9 68b_500000 0.007615234 12.780 1655
9 68b_400000 0.013811691 12.788 1656
9 68b_300000 0.002608341 12.802 1657
9 68b_200000 0.013142294 12.805 1658
9 68b_100000 0.010760939 12.818 1659
9 68b_0 0.002741746 12.828 1660
9 124_700000 0.005066644 12.831 1661
9 124_600000 0.008893199 12.836 1662
9 124_500000 0.009753528 12.845 1663
9 124_400000 0.0040901 12.855 1664
9 124_300000 0.013763374 12.859 1665
9 124_100000 0.001750806 12.873 1666
9 124_0 0 12.874 1667
9 20_2300000 0.00419192 12.874 1668
9 20_2200000 0.001979657 12.879 1669
9 20_2100000 0.007754134 12.881 1670
9 20_2000000 0.003818371 12.888 1671
9 20_1900000 0.01173326 12.892 1672
9 20_1600000 0.000837191 12.904 1673
9 20_1500000 0.004044293 12.905 1674
9 20_1400000 0 12.909 1675
9 20_1300000 0.004366974 12.909 1676
9 20_1200000 0.010464428 12.913 1677
9 20_1100000 0 12.924 1678
9 20_1000000 0 12.924 1679
9 20_900000 0.00776613 12.924 1680
9 20_700000 0 12.931 1681
9 20_600000 0.00253937 12.931 1682
9 20_500000 0.004831222 12.934 1683
9 20_400000 0 12.939 1684
9 20_300000 0 12.939 1685
9 117a_100000 0 12.939 1686
9 12a_300000 0.005498214 12.939 1687
9 12a_100000 0 12.944 1688
9 12a_0 0.004155107 12.944 1689
9 147_500000 0.000901824 12.948 1690
9 147_600000 0 12.949 1691
9 283_0 0 12.949 1692
9 283_100000 0 12.949 1693
9 86_100000 0.000974198 12.949 1694
9 152_0 0.002426883 12.950 1695
9 103b_0 0 12.953 1696
9 103b_100000 0.00109972 12.953 1697
9 103b_200000 0.004200709 12.954 1698
9 103b_400000 0 12.958 1699
9 103b_500000 0 12.958 1700
9 103b_600000 0.002451098 12.958 1701
9 84_1000000 0 12.961 1702
9 84_700000 0 12.961 1703
9 84_600000 0.001444481 12.961 1704
9 84_500000 0 12.962 1705
9 84_400000 0.001133878 12.962 1706
9 84_300000 0 12.963 1707
9 84_200000 0.007396703 12.963 1708
9 84_0 0.020167316 12.970 1709
9 238_0 0.009005777 12.991 1710
9 238_200000 0.003755001 13.000 1711
9 157_300000 0.001998733 13.003 1712
9 157_400000 0.006162086 13.005 1713
9 157_500000 0.009180701 13.012 1714
9 63b_900000 0.004677543 13.021 1715
9 63b_700000 0.001959167 13.025 1716
9 63b_600000 0.00539034 13.027 1717
9 63b_500000 0.019351676 13.033 1718
9 63b_400000 0.008185809 13.052 1719
9 63b_100000 0.007363873 13.060 1720
9 63b_0 0 13.068 1721
9 257a_0 0 13.068 1722
9 257a_100000 0.171679241 13.068 1723
9 341_0 13.239 1724'''.split('\n')
v2_genome = ['1\t79_0',
'1\t79_100000',
'1\t79_200000',
'1\t79_300000',
'1\t79_400000',
'1\t79_500000',
'1\t79_600000',
'1\t79_700000',
'1\t79_800000',
'1\t79_900000',
'1\t79_1000000',
'1\t79_1100000',
'1\t69_0',
'1\t69_100000',
'1\t69_200000',
'1\t69_300000',
'1\t69_400000',
'1\t69_500000',
'1\t69_600000',
'1\t69_700000',
'1\t69_800000',
'1\t69_900000',
'1\t69_1000000',
'1\t69_1100000',
'1\t69_1200000',
'1\t181_0',
'1\t181_100000',
'1\t181_200000',
'1\t181_300000',
'1\t181_400000',
'1\t181_500000',
'1\t559_0',
'1\t60_0',
'1\t60_100000',
'1\t60_200000',
'1\t60_300000',
'1\t60_400000',
'1\t60_500000',
'1\t60_600000',
'1\t60_700000',
'1\t60_800000',
'1\t60_900000',
'1\t60_1000000',
'1\t60_1100000',
'1\t60_1200000',
'1\t60_1300000',
'1\t60_1400000',
'1\t333_100000',
'1\t333_0',
'1\t314_100000',
'1\t314_0',
'1\t308_100000',
'1\t428_0',
'1\t165_500000',
'1\t165_400000',
'1\t165_200000',
'1\t165_100000',
'1\t165_0',
'1\t240_100000',
'1\t254b_0',
'1\t232_100000',
'1\t232_200000',
'1\t232_300000',
'1\t80a_700000',
'1\t80a_600000',
'1\t80a_500000',
'1\t80a_400000',
'1\t80a_300000',
'1\t80a_200000',
'1\t80a_100000',
'1\t80a_0',
'1\t83a_0',
'1\t83a_100000',
'1\t146_0',
'1\t146_100000',
'1\t82_900000',
'1\t82_800000',
'1\t82_700000',
'1\t82_600000',
'1\t82_500000',
'1\t82_400000',
'1\t82_300000',
'1\t82_200000',
'1\t82_100000',
'1\t82_0',
'1\t115b_100000',
'1\t115b_200000',
'1\t115b_400000',
'1\t74_700000',
'10\t13_2600000',
'10\t13_2500000',
'10\t13_2400000',
'10\t13_2300000',
'10\t13_2200000',
'10\t13_2100000',
'10\t13_2000000',
'10\t13_1900000',
'10\t13_1800000',
'10\t13_1700000',
'10\t13_1500000',
'10\t13_1400000',
'10\t13_1300000',
'10\t13_1200000',
'10\t13_1100000',
'10\t13_1000000',
'10\t13_900000',
'10\t13_800000',
'10\t13_700000',
'10\t223_300000',
'10\t223_200000',
'10\t223_100000',
'10\t223_0',
'10\t445_0',
'10\t4a_100000',
'10\t4a_0',
'10\t106b_0',
'10\t324_100000',
'10\t324_0',
'10\t90_0',
'10\t90_100000',
'10\t90_200000',
'10\t90_300000',
'10\t90_400000',
'10\t90_500000',
'10\t90_600000',
'10\t90_700000',
'10\t90_800000',
'10\t90_900000',
'10\t90_1000000',
'10\t48a_0',
'10\t48a_100000',
'10\t48a_200000',
'10\t48a_300000',
'10\t48a_400000',
'10\t48a_500000',
'10\t48a_600000',
'10\t48a_700000',
'10\t48a_800000',
'10\t749_0',
'10\t210_300000',
'10\t210_200000',
'10\t210_100000',
'10\t210_0',
'10\t206_400000',
'10\t206_300000',
'10\t206_200000',
'10\t206_100000',
'10\t206_0',
'10\t40_100000',
'10\t40_600000',
'10\t40_900000',
'10\t40_1100000',
'10\t40_1200000',
'10\t40_1300000',
'10\t40_1400000',
'10\t477_0',
'10\t267_200000',
'10\t267_0',
'10\t33_1700000',
'10\t33_1600000',
'10\t33_1400000',
'10\t33_1300000',
'10\t33_1200000',
'10\t33_1100000',
'10\t33_900000',
'10\t33_800000',
'10\t33_700000',
'10\t33_600000',
'10\t33_400000',
'10\t156_0',
'10\t156_300000',
'10\t156_500000',
'10\t9a_300000',
'10\t9a_600000',
'10\t9a_800000',
'10\t9a_1000000',
'10\t9a_1100000',
'10\t172_400000',
'10\t172_300000',
'10\t172_200000',
'10\t50_200000',
'10\t50_300000',
'10\t50_600000',
'10\t50_800000',
'10\t50_1200000',
'10\t50_1300000',
'10\t50_1400000',
'10\t209_0',
'10\t209_100000',
'10\t209_300000',
'10\t125b_300000',
'10\t125b_200000',
'10\t125b_100000',
'10\t125b_0',
'10\t188_500000',
'10\t188_400000',
'10\t188_300000',
'10\t311_0',
'10\t204_400000',
'10\t204_300000',
'10\t204_200000',
'10\t204_100000',
'10\t204_0',
'10\t87_0',
'10\t87_100000',
'10\t87_200000',
'10\t87_300000',
'10\t87_400000',
'10\t87_500000',
'10\t87_600000',
'10\t87_700000',
'10\t87_800000',
'10\t87_900000',
'10\t87_1000000',
'10\t159_0',
'10\t159_100000',
'10\t159_200000',
'10\t159_300000',
'10\t159_400000',
'10\t159_500000',
'11\t75_0',
'11\t75_100000',
'11\t75_200000',
'11\t75_300000',
'11\t75_400000',
'11\t75_500000',
'11\t75_600000',
'11\t75_700000',
'11\t75_800000',
'11\t75_900000',
'11\t75_1000000',
'11\t75_1100000',
'11\t228_0',
'11\t228_100000',
'11\t228_200000',
'11\t228_300000',
'11\t273b_0',
'11\t273b_100000',
'11\t213_0',
'11\t213_100000',
'11\t213_200000',
'11\t213_300000',
'11\t213_400000',
'11\t63a_0',
'11\t63a_100000',
'11\t63a_200000',
'11\t30_1900000',
'11\t30_1800000',
'11\t30_1700000',
'11\t30_1600000',
'11\t30_1500000',
'11\t30_1400000',
'11\t30_1300000',
'11\t30_1200000',
'11\t30_1100000',
'11\t48b_600000',
'11\t48b_500000',
'11\t48b_400000',
'11\t48b_300000',
'11\t48b_200000',
'11\t48b_100000',
'11\t167_0',
'11\t167_100000',
'11\t167_200000',
'11\t162_500000',
'11\t162_400000',
'11\t162_300000',
'11\t162_200000',
'11\t162_100000',
'11\t162_0',
'11\t779_0',
'11\t161_400000',
'11\t161_300000',
'11\t161_200000',
'11\t161_100000',
'11\t161_0',
'11\t100_700000',
'11\t100_600000',
'11\t100_500000',
'11\t100_300000',
'11\t100_200000',
'11\t100_100000',
'11\t243_0',
'11\t239_100000',
'11\t239_200000',
'11\t415_0',
'11\t370_0',
'11\t182_200000',
'11\t185_0',
'11\t39b_700000',
'11\t39b_600000',
'11\t39b_500000',
'11\t39b_300000',
'11\t39b_200000',
'11\t221a_0',
'11\t221a_100000',
'11\t199_300000',
'11\t47b_100000',
'11\t47b_300000',
'11\t47b_500000',
'11\t47b_900000',
'11\t49_500000',
'11\t49_200000',
'11\t49_0',
'11\t176_0',
'12\t58b_900000',
'12\t58b_800000',
'12\t58b_700000',
'12\t58b_600000',
'12\t58b_500000',
'12\t58b_400000',
'12\t58b_300000',
'12\t58b_200000',
'12\t58b_100000',
'12\t220_300000',
'12\t220_200000',
'12\t220_100000',
'12\t220_0',
'12\t336_100000',
'12\t336_0',
'12\t397_100000',
'12\t360_0',
'12\t201_300000',
'12\t201_200000',
'12\t380_0',
'12\t224_0',
'12\t224_100000',
'12\t224_300000',
'12\t132_500000',
'12\t132_400000',
'12\t132_300000',
'12\t132_200000',
'12\t132_100000',
'12\t132_0',
'12\t349_100000',
'12\t349_0',
'12\t222_0',
'12\t222_300000',
'12\t703_0',
'12\t672_0',
'12\t299_300000',
'12\t143_500000',
'12\t143_200000',
'12\t31_1800000',
'12\t31_700000',
'12\t31_100000',
'12\t114a_300000',
'12\t114a_0',
'12\t241_0',
'12\t241_100000',
'12\t241_200000',
'12\t241_300000',
'12\t125a_100000',
'12\t394_0',
'12\t160_400000',
'12\t160_300000',
'12\t160_200000',
'12\t160_0',
'12\t91_300000',
'12\t91_600000',
'12\t91_700000',
'12\t91_800000',
'12\t440_0',
'12\t142_600000',
'12\t142_500000',
'12\t142_400000',
'12\t142_300000',
'12\t142_200000',
'12\t142_100000',
'12\t142_0',
'12\t435_0',
'12\t140a_200000',
'12\t140a_100000',
'12\t140a_0',
'12\t37_1700000',
'12\t37_1600000',
'12\t37_1500000',
'12\t37_1400000',
'12\t37_1300000',
'12\t37_1200000',
'12\t37_1100000',
'12\t37_1000000',
'12\t37_900000',
'12\t37_800000',
'12\t37_700000',
'12\t37_600000',
'12\t37_500000',
'12\t37_400000',
'12\t37_300000',
'12\t37_200000',
'12\t37_100000',
'12\t37_0',
'12\t297_100000',
'12\t297_0',
'12\t334_100000',
'12\t334_0',
'12\t39a_800000',
'12\t39a_700000',
'12\t39a_600000',
'12\t39a_500000',
'12\t39a_400000',
'12\t39a_300000',
'12\t39a_200000',
'12\t39a_100000',
'12\t39a_0',
'12\t17b_0',
'12\t17b_100000',
'12\t17b_200000',
'12\t17b_300000',
'12\t17b_400000',
'12\t17b_500000',
'12\t17b_600000',
'12\t17b_700000',
'12\t17b_800000',
'12\t17b_900000',
'12\t17b_1000000',
'12\t17b_1100000',
'12\t17b_1200000',
'12\t683_0',
'12\t44b_0',
'12\t44b_100000',
'12\t44b_200000',
'12\t44b_300000',
'12\t44b_400000',
'12\t44b_500000',
'13\t122_0',
'13\t122_100000',
'13\t122_200000',
'13\t122_300000',
'13\t122_400000',
'13\t122_500000',
'13\t122_600000',
'13\t325_100000',
'13\t325_0',
'13\t369_0',
'13\t129b_400000',
'13\t129b_300000',
'13\t129b_200000',
'13\t129b_100000',
'13\t28_1200000',
'13\t28_400000',
'13\t28_0',
'13\t139_600000',
'13\t139_400000',
'13\t139_300000',
'13\t139_200000',
'13\t139_100000',
'13\t139_0',
'13\t398_0',
'13\t52_1400000',
'13\t52_1300000',
'13\t52_1200000',
'13\t52_800000',
'13\t52_400000',
'13\t52_300000',
'13\t52_200000',
'13\t52_100000',
'13\t52_0',
'13\t235_300000',
'13\t235_200000',
'13\t235_0',
'13\t34_1700000',
'13\t34_1600000',
'13\t34_1500000',
'13\t34_1400000',
'13\t34_1200000',
'13\t34_1000000',
'13\t34_600000',
'13\t138_400000',
'13\t236_200000',
'13\t402_0',
'13\t174_0',
'13\t174_100000',
'13\t174_200000',
'13\t174_300000',
'13\t174_400000',
'13\t174_500000',
'13\t344_0',
'13\t108_800000',
'13\t108_700000',
'13\t108_600000',
'13\t108_500000',
'13\t108_400000',
'13\t108_300000',
'13\t108_200000',
'13\t108_100000',
'13\t245_300000',
'13\t245_200000',
'13\t245_100000',
'13\t245_0',
'13\t4b_3800000',
'13\t4b_3700000',
'13\t4b_3600000',
'13\t4b_3500000',
'13\t4b_3400000',
'13\t4b_3300000',
'13\t4b_3200000',
'13\t4b_3100000',
'13\t4b_3000000',
'13\t4b_2900000',
'13\t4b_2800000',
'13\t4b_2700000',
'13\t4b_2600000',
'13\t4b_2500000',
'13\t4b_2400000',
'13\t4b_2300000',
'13\t4b_2200000',
'13\t4b_2100000',
'13\t4b_2000000',
'13\t4b_1900000',
'13\t4b_1800000',
'13\t4b_1700000',
'13\t4b_1600000',
'13\t4b_1500000',
'13\t4b_1400000',
'13\t4b_1300000',
'13\t4b_1200000',
'13\t4b_1100000',
'13\t4b_1000000',
'13\t4b_900000',
'13\t4b_800000',
'13\t4b_700000',
'13\t4b_600000',
'13\t4b_500000',
'13\t4b_400000',
'13\t4b_300000',
'13\t4b_200000',
'13\t4b_100000',
'13\t4b_0',
'13\t83b_800000',
'13\t83b_700000',
'13\t83b_600000',
'13\t83b_500000',
'13\t83b_400000',
'13\t83b_300000',
'13\t83b_200000',
'13\t83b_100000',
'13\t83b_0',
'13\t115a_300000',
'13\t115a_200000',
'13\t115a_100000',
'13\t115a_0',
'14\t102_0',
'14\t102_100000',
'14\t102_200000',
'14\t102_300000',
'14\t102_400000',
'14\t102_500000',
'14\t102_600000',
'14\t102_700000',
'14\t102_800000',
'14\t102_900000',
'14\t6c_0',
'14\t6c_100000',
'14\t26a_0',
'14\t26a_100000',
'14\t26a_200000',
'14\t26a_300000',
'14\t26a_400000',
'14\t26a_500000',
'14\t26a_600000',
'14\t26a_700000',
'14\t26a_800000',
'14\t26a_900000',
'14\t26a_1000000',
'14\t26a_1100000',
'14\t26a_1200000',
'14\t26a_1300000',
'14\t26a_1400000',
'14\t26a_1500000',
'14\t278_0',
'14\t278_100000',
'14\t148_100000',
'14\t148_200000',
'14\t148_300000',
'14\t148_400000',
'14\t148_500000',
'14\t148_600000',
'14\t198_0',
'14\t198_100000',
'14\t198_200000',
'14\t198_300000',
'14\t101_0',
'14\t101_100000',
'14\t101_200000',
'14\t101_300000',
'14\t101_400000',
'14\t101_500000',
'14\t101_600000',
'14\t101_800000',
'14\t101_900000',
'14\t419_0',
'14\t247_0',
'14\t247_100000',
'14\t247_200000',
'14\t17a_1000000',
'14\t17a_900000',
'14\t17a_800000',
'14\t17a_700000',
'14\t17a_600000',
'14\t17a_500000',
'14\t17a_400000',
'14\t17a_300000',
'14\t17a_200000',
'14\t17a_100000',
'14\t17a_0',
'14\t208_0',
'14\t208_100000',
'14\t208_300000',
'14\t208_400000',
'14\t207_0',
'14\t207_100000',
'14\t207_200000',
'14\t207_300000',
'14\t164_500000',
'14\t164_400000',
'14\t164_300000',
'14\t164_200000',
'14\t164_0',
'14\t123_700000',
'14\t123_600000',
'14\t123_500000',
'14\t123_400000',
'14\t123_300000',
'14\t123_200000',
'14\t123_100000',
'14\t123_0',
'14\t15_0',
'14\t15_100000',
'14\t15_200000',
'14\t15_300000',
'14\t15_400000',
'14\t15_500000',
'14\t15_600000',
'14\t15_700000',
'14\t15_800000',
'14\t211_0',
'14\t211_100000',
'14\t211_200000',
'14\t211_300000',
'14\t291_100000',
'14\t127_0',
'14\t127_200000',
'14\t127_300000',
'14\t127_400000',
'14\t127_500000',
'14\t168_100000',
'14\t168_200000',
'14\t168_300000',
'14\t168_400000',
'14\t168_500000',
'14\t482_0',
'14\t361_0',
'14\t361_100000',
'14\t300_0',
'14\t300_100000',
'14\t300_200000',
'14\t126_0',
'14\t126_100000',
'14\t126_200000',
'14\t126_300000',
'14\t126_400000',
'14\t126_500000',
'14\t126_600000',
'14\t121_700000',
'14\t121_600000',
'14\t121_500000',
'14\t121_300000',
'14\t121_200000',
'14\t303_0',
'14\t303_100000',
'14\t195_0',
'14\t195_100000',
'14\t195_200000',
'14\t195_400000',
'14\t219_0',
'14\t219_100000',
'14\t219_200000',
'14\t140b_0',
'14\t140b_100000',
'14\t140b_200000',
'14\t140b_300000',
'14\t140b_400000',
'14\t444_0',
'14\t128_0',
'14\t128_100000',
'14\t128_200000',
'14\t128_300000',
'14\t128_400000',
'14\t128_600000',
'14\t128_700000',
'14\t178_0',
'14\t178_100000',
'14\t178_200000',
'14\t178_300000',
'14\t178_400000',
'14\t178_500000',
'14\t382_0',
'14\t92_0',
'14\t92_100000',
'14\t92_200000',
'14\t92_300000',
'14\t92_400000',
'14\t92_500000',
'14\t92_600000',
'14\t92_700000',
'14\t92_800000',
'14\t92_900000',
'14\t92_1000000',
'14\t459_0',
'14\t290_0',
'14\t290_100000',
'14\t290_200000',
'14\t2_0',
'14\t2_100000',
'14\t2_200000',
'14\t2_300000',
'14\t2_400000',
'14\t2_500000',
'14\t2_600000',
'14\t2_700000',
'14\t2_800000',
'14\t2_900000',
'14\t2_1000000',
'14\t2_1100000',
'14\t2_1200000',
'14\t2_1300000',
'14\t2_1400000',
'14\t2_1500000',
'14\t2_1600000',
'14\t2_1700000',
'14\t2_1800000',
'14\t2_1900000',
'14\t2_2000000',
'14\t2_2100000',
'14\t2_2200000',
'14\t2_2300000',
'14\t2_2400000',
'14\t2_2500000',
'14\t2_2600000',
'14\t2_2700000',
'14\t2_2800000',
'14\t2_2900000',
'14\t2_3000000',
'14\t2_3100000',
'14\t2_3200000',
'14\t2_3300000',
'14\t2_3400000',
'14\t2_3500000',
'14\t2_3600000',
'14\t2_3700000',
'14\t2_3800000',
'14\t2_3900000',
'14\t2_4000000',
'14\t2_4100000',
'14\t2_4200000',
'14\t2_4300000',
'14\t2_4400000',
'14\t2_4500000',
'14\t58a_0',
'14\t58a_100000',
'14\t58a_200000',
'14\t58a_300000',
'14\t58a_400000',
'14\t58a_500000',
'14\t396_0',
'2\t18_2200000',
'2\t18_2100000',
'2\t18_2000000',
'2\t18_1900000',
'2\t18_1800000',
'2\t18_1700000',
'2\t18_1600000',
'2\t18_1500000',
'2\t18_1400000',
'2\t18_1300000',
'2\t18_1200000',
'2\t18_1100000',
'2\t18_1000000',
'2\t18_900000',
'2\t18_800000',
'2\t18_700000',
'2\t18_600000',
'2\t18_500000',
'2\t18_400000',
'2\t18_300000',
'2\t18_200000',
'2\t18_100000',
'2\t18_0',
'2\t89_0',
'2\t89_100000',
'2\t89_200000',
'2\t89_300000',
'2\t89_400000',
'2\t89_500000',
'2\t89_600000',
'2\t89_700000',
'2\t89_800000',
'2\t89_900000',
'2\t89_1000000',
'2\t249_200000',
'2\t249_100000',
'2\t249_0',
'2\t212_400000',
'2\t212_300000',
'2\t212_200000',
'2\t212_100000',
'2\t44a_0',
'2\t44a_100000',
'2\t44a_200000',
'2\t44a_300000',
'2\t44a_400000',
'2\t44a_500000',
'2\t44a_600000',
'2\t44a_700000',
'2\t81_900000',
'2\t81_700000',
'2\t81_500000',
'2\t81_400000',
'2\t81_300000',
'2\t81_200000',
'2\t81_100000',
'2\t81_0',
'2\t112_400000',
'2\t112_600000',
'2\t112_800000',
'2\t19_100000',
'2\t19_200000',
'2\t19_300000',
'2\t19_400000',
'2\t19_600000',
'2\t19_1300000',
'2\t73_600000',
'2\t73_800000',
'2\t73_900000',
'2\t65_0',
'2\t65_300000',
'2\t65_500000',
'2\t65_1000000',
'2\t65_1100000',
'2\t42_300000',
'2\t42_400000',
'2\t42_600000',
'2\t42_800000',
'2\t42_1000000',
'2\t42_1200000',
'2\t42_1400000',
'2\t42_1500000',
'2\t42_1600000',
'2\t173_0',
'2\t173_100000',
'2\t173_200000',
'2\t173_300000',
'2\t173_400000',
'2\t173_500000',
'2\t41a_0',
'2\t41a_100000',
'2\t41a_200000',
'2\t41a_300000',
'2\t41a_500000',
'2\t41a_600000',
'2\t41a_700000',
'2\t41a_800000',
'2\t41a_900000',
'2\t41a_1000000',
'2\t41a_1100000',
'2\t41a_1200000',
'2\t41a_1400000',
'2\t41a_1500000',
'2\t429_0',
'2\t151_0',
'2\t151_100000',
'2\t151_200000',
'2\t151_300000',
'2\t151_400000',
'2\t151_500000',
'2\t151_600000',
'2\t27_0',
'2\t27_100000',
'2\t27_200000',
'2\t27_300000',
'2\t27_400000',
'2\t27_500000',
'2\t27_600000',
'2\t27_700000',
'2\t27_800000',
'2\t27_900000',
'2\t27_1000000',
'2\t27_1100000',
'2\t27_1200000',
'2\t27_1300000',
'2\t27_1400000',
'2\t27_1500000',
'2\t27_1600000',
'2\t27_1700000',
'2\t27_1800000',
'2\t27_1900000',
'2\t27_2000000',
'3\t62_500000',
'3\t62_300000',
'3\t62_0',
'3\t35_1600000',
'3\t35_1400000',
'3\t35_1300000',
'3\t35_800000',
'3\t35_700000',
'3\t35_600000',
'3\t35_400000',
'3\t35_300000',
'3\t35_100000',
'3\t389_0',
'3\t338_0',
'3\t55a_200000',
'3\t55a_100000',
'3\t55a_0',
'3\t116_700000',
'3\t116_600000',
'3\t116_500000',
'3\t116_400000',
'3\t116_300000',
'3\t116_100000',
'3\t70b_300000',
'3\t70b_500000',
'3\t70b_600000',
'3\t72_300000',
'3\t72_100000',
'3\t137_400000',
'3\t137_300000',
'3\t137_200000',
'3\t137_100000',
'3\t137_0',
'3\t316_100000',
'3\t316_0',
'3\t265_200000',
'3\t265_100000',
'3\t265_0',
'3\t23_2000000',
'3\t23_1900000',
'3\t23_1600000',
'3\t23_1100000',
'3\t23_1000000',
'3\t5a_1800000',
'3\t5a_1600000',
'3\t5a_1500000',
'3\t5a_1300000',
'3\t5a_1200000',
'3\t5a_1100000',
'3\t5a_1000000',
'3\t5a_900000',
'3\t5a_800000',
'3\t5a_700000',
'3\t5a_600000',
'3\t5a_500000',
'3\t5a_400000',
'3\t5a_300000',
'3\t5a_200000',
'3\t106a_0',
'3\t106a_200000',
'3\t106a_300000',
'3\t106a_400000',
'3\t106a_500000',
'3\t106a_600000',
'3\t106a_700000',
'3\t258_300000',
'3\t258_200000',
'3\t258_100000',
'3\t258_0',
'3\t169_500000',
'3\t169_400000',
'3\t169_300000',
'3\t169_200000',
'3\t169_100000',
'3\t169_0',
'3\t98_900000',
'3\t98_800000',
'3\t98_700000',
'3\t98_600000',
'3\t98_500000',
'3\t98_400000',
'3\t98_300000',
'3\t98_200000',
'3\t98_100000',
'3\t98_0',
'3\t261_200000',
'3\t261_100000',
'3\t261_0',
'3\t273a_0',
'3\t64_1300000',
'3\t64_1200000',
'3\t64_1100000',
'3\t64_1000000',
'3\t64_900000',
'3\t64_800000',
'3\t64_700000',
'3\t64_600000',
'3\t64_500000',
'3\t64_400000',
'3\t64_300000',
'3\t64_200000',
'3\t64_100000',
'3\t64_0',
'4\t1_4900000',
'4\t1_4800000',
'4\t1_4700000',
'4\t1_4600000',
'4\t1_4500000',
'4\t1_4400000',
'4\t1_4300000',
'4\t1_4200000',
'4\t1_4100000',
'4\t1_4000000',
'4\t1_3900000',
'4\t1_3800000',
'4\t1_3700000',
'4\t1_3600000',
'4\t1_3500000',
'4\t1_3400000',
'4\t1_3300000',
'4\t1_3200000',
'4\t1_3100000',
'4\t1_3000000',
'4\t1_2900000',
'4\t1_2800000',
'4\t1_2700000',
'4\t1_2600000',
'4\t1_2500000',
'4\t1_2400000',
'4\t1_2300000',
'4\t1_2200000',
'4\t1_2100000',
'4\t1_2000000',
'4\t1_1900000',
'4\t1_1800000',
'4\t1_1700000',
'4\t1_1600000',
'4\t1_1500000',
'4\t1_1400000',
'4\t1_1300000',
'4\t1_1200000',
'4\t1_1100000',
'4\t1_1000000',
'4\t1_900000',
'4\t1_800000',
'4\t1_700000',
'4\t1_600000',
'4\t1_500000',
'4\t1_400000',
'4\t1_300000',
'4\t1_200000',
'4\t1_100000',
'4\t1_0',
'4\t841_0',
'4\t54_1400000',
'4\t54_1300000',
'4\t54_1200000',
'4\t54_1100000',
'4\t54_1000000',
'4\t54_900000',
'4\t54_800000',
'4\t54_700000',
'4\t54_600000',
'4\t54_500000',
'4\t54_400000',
'4\t54_300000',
'4\t54_200000',
'4\t54_100000',
'4\t54_0',
'4\t120_800000',
'4\t120_700000',
'4\t120_600000',
'4\t120_500000',
'4\t120_400000',
'4\t120_300000',
'4\t120_200000',
'4\t120_0',
'4\t150_300000',
'4\t154_400000',
'4\t154_200000',
'4\t154_0',
'4\t254a_0',
'4\t177_400000',
'4\t177_300000',
'4\t177_200000',
'4\t177_100000',
'4\t177_0',
'4\t57_700000',
'4\t57_600000',
'4\t57_500000',
'4\t57_300000',
'4\t57_100000',
'4\t57_0',
'4\t110_600000',
'4\t110_400000',
'4\t110_300000',
'4\t6a_2300000',
'4\t6a_2200000',
'4\t6a_2100000',
'4\t6a_2000000',
'4\t6a_1900000',
'4\t6a_1800000',
'4\t6a_1700000',
'4\t6a_1600000',
'4\t6a_1500000',
'4\t6a_1300000',
'4\t6a_1100000',
'4\t6a_1000000',
'4\t6a_900000',
'4\t6a_800000',
'4\t6a_700000',
'4\t6a_600000',
'4\t6a_100000',
'4\t320_0',
'4\t320_100000',
'4\t55b_0',
'4\t55b_100000',
'4\t55b_200000',
'4\t55b_300000',
'4\t55b_400000',
'4\t55b_500000',
'4\t55b_600000',
'4\t55b_1000000',
'4\t443_0',
'4\t346_0',
'4\t346_100000',
'4\t85_1100000',
'4\t85_1000000',
'4\t85_900000',
'4\t85_800000',
'4\t85_700000',
'4\t85_600000',
'4\t85_500000',
'4\t85_400000',
'4\t85_300000',
'4\t85_200000',
'4\t85_100000',
'4\t85_0',
'4\t706_0',
'4\t196_500000',
'4\t196_400000',
'4\t196_300000',
'4\t196_200000',
'4\t196_100000',
'4\t196_0',
'4\t541_0',
'4\t766_0',
'4\t638_0',
'4\t479_0',
'4\t384_0',
'4\t44c_100000',
'4\t44c_0',
'4\t163_0',
'4\t163_100000',
'4\t163_200000',
'4\t163_300000',
'4\t163_400000',
'4\t163_500000',
'4\t163_600000',
'4\t633_0',
'5\t129a_0',
'5\t129a_100000',
'5\t129a_200000',
'5\t66_0',
'5\t66_100000',
'5\t66_200000',
'5\t66_300000',
'5\t66_400000',
'5\t66_500000',
'5\t66_600000',
'5\t66_700000',
'5\t66_800000',
'5\t66_900000',
'5\t66_1000000',
'5\t66_1100000',
'5\t66_1200000',
'5\t461_0',
'5\t32_0',
'5\t32_100000',
'5\t32_200000',
'5\t32_300000',
'5\t32_400000',
'5\t32_500000',
'5\t32_600000',
'5\t32_700000',
'5\t32_800000',
'5\t32_900000',
'5\t32_1000000',
'5\t32_1100000',
'5\t32_1200000',
'5\t32_1300000',
'5\t32_1400000',
'5\t32_1500000',
'5\t32_1600000',
'5\t32_1700000',
'5\t32_1800000',
'5\t607_0',
'5\t242_0',
'5\t242_100000',
'5\t242_200000',
'5\t242_300000',
'5\t205_0',
'5\t205_100000',
'5\t205_200000',
'5\t205_300000',
'5\t205_400000',
'5\t104a_0',
'5\t104a_100000',
'5\t104a_200000',
'5\t104a_300000',
'5\t244_0',
'5\t244_100000',
'5\t94_100000',
'5\t94_200000',
'5\t94_300000',
'5\t94_400000',
'5\t94_600000',
'5\t94_800000',
'5\t94_900000',
'5\t317_0',
'5\t88_400000',
'5\t276_0',
'5\t276_100000',
'5\t281a_100000',
'5\t149_100000',
'5\t365_0',
'5\t288_200000',
'5\t170_0',
'5\t170_100000',
'5\t170_200000',
'5\t170_300000',
'5\t170_400000',
'5\t197_400000',
'5\t197_300000',
'5\t158_600000',
'5\t158_500000',
'5\t158_400000',
'5\t158_300000',
'5\t158_200000',
'5\t158_100000',
'5\t158_0',
'5\t498_0',
'5\t449_0',
'5\t327_0',
'5\t327_100000',
'5\t36_0',
'5\t36_100000',
'5\t36_200000',
'5\t36_300000',
'5\t36_400000',
'5\t36_500000',
'5\t36_600000',
'5\t36_700000',
'5\t36_800000',
'5\t36_900000',
'5\t36_1000000',
'5\t36_1100000',
'5\t36_1200000',
'5\t36_1300000',
'5\t36_1400000',
'5\t36_1500000',
'5\t36_1600000',
'5\t36_1700000',
'5\t266_0',
'5\t266_100000',
'5\t266_200000',
'6\t270_200000',
'6\t270_100000',
'6\t270_0',
'6\t104b_500000',
'6\t104b_400000',
'6\t104b_300000',
'6\t104b_200000',
'6\t104b_100000',
'6\t104b_0',
'6\t51_1400000',
'6\t51_1300000',
'6\t51_1200000',
'6\t51_1100000',
'6\t51_1000000',
'6\t51_900000',
'6\t51_800000',
'6\t51_700000',
'6\t51_600000',
'6\t51_500000',
'6\t51_400000',
'6\t51_300000',
'6\t51_200000',
'6\t51_100000',
'6\t51_0',
'6\t8_3400000',
'6\t8_3300000',
'6\t8_3200000',
'6\t8_3100000',
'6\t8_3000000',
'6\t8_2900000',
'6\t8_2800000',
'6\t8_2700000',
'6\t8_2600000',
'6\t8_2500000',
'6\t8_2400000',
'6\t8_2300000',
'6\t8_2200000',
'6\t8_2100000',
'6\t8_2000000',
'6\t8_1900000',
'6\t8_1800000',
'6\t8_1700000',
'6\t8_1600000',
'6\t8_1500000',
'6\t8_1400000',
'6\t8_1300000',
'6\t8_1200000',
'6\t8_1100000',
'6\t8_1000000',
'6\t8_900000',
'6\t8_800000',
'6\t8_700000',
'6\t8_600000',
'6\t8_500000',
'6\t8_400000',
'6\t8_300000',
'6\t8_200000',
'6\t8_100000',
'6\t8_0',
'6\t293_100000',
'6\t293_0',
'6\t21_0',
'6\t21_100000',
'6\t21_200000',
'6\t21_300000',
'6\t21_500000',
'6\t21_600000',
'6\t21_700000',
'6\t21_1100000',
'6\t21_1200000',
'6\t21_1600000',
'6\t21_1700000',
'6\t67_200000',
'6\t67_300000',
'6\t43_1000000',
'6\t43_1100000',
'6\t43_1200000',
'6\t43_1400000',
'6\t43_1500000',
'6\t136_600000',
'6\t136_500000',
'6\t136_400000',
'6\t136_300000',
'6\t136_0',
'6\t248_100000',
'6\t119_600000',
'6\t119_500000',
'6\t119_400000',
'6\t119_200000',
'6\t119_100000',
'6\t25_1700000',
'6\t25_1600000',
'6\t25_1000000',
'6\t25_900000',
'6\t25_800000',
'6\t25_500000',
'6\t25_400000',
'6\t25_300000',
'6\t25_200000',
'6\t25_100000',
'6\t25_0',
'6\t307_100000',
'6\t190_300000',
'6\t190_200000',
'6\t190_100000',
'6\t190_0',
'6\t179_200000',
'6\t179_100000',
'6\t179_0',
'6\t262_200000',
'6\t262_100000',
'6\t262_0',
'6\t16_2400000',
'6\t16_2300000',
'6\t16_2200000',
'6\t16_2100000',
'6\t16_2000000',
'6\t16_1900000',
'6\t16_1800000',
'6\t16_1700000',
'6\t16_1600000',
'6\t16_1500000',
'6\t16_1400000',
'6\t16_1300000',
'6\t16_1200000',
'6\t16_1100000',
'6\t16_1000000',
'6\t16_900000',
'6\t16_800000',
'6\t16_700000',
'6\t16_600000',
'6\t16_500000',
'6\t16_400000',
'6\t16_300000',
'6\t16_200000',
'6\t16_100000',
'6\t16_0',
'7\t251_200000',
'7\t251_100000',
'7\t251_0',
'7\t335_0',
'7\t335_100000',
'7\t113_900000',
'7\t113_800000',
'7\t113_700000',
'7\t113_600000',
'7\t113_500000',
'7\t113_400000',
'7\t113_300000',
'7\t113_200000',
'7\t113_100000',
'7\t113_0',
'7\t130_800000',
'7\t130_700000',
'7\t130_600000',
'7\t130_500000',
'7\t130_400000',
'7\t130_300000',
'7\t130_200000',
'7\t130_100000',
'7\t130_0',
'7\t416_0',
'7\t255_200000',
'7\t255_100000',
'7\t255_0',
'7\t476_0',
'7\t9c_0',
'7\t9c_100000',
'7\t9c_200000',
'7\t9c_300000',
'7\t9c_400000',
'7\t9c_500000',
'7\t114b_0',
'7\t114b_100000',
'7\t114b_200000',
'7\t114b_300000',
'7\t114b_400000',
'7\t114b_500000',
'7\t274_0',
'7\t274_100000',
'7\t274_200000',
'7\t29_0',
'7\t29_100000',
'7\t29_200000',
'7\t29_300000',
'7\t29_400000',
'7\t29_500000',
'7\t29_800000',
'7\t29_900000',
'7\t29_1100000',
'7\t29_1200000',
'7\t29_1400000',
'7\t29_1500000',
'7\t29_1800000',
'7\t351_0',
'7\t351_100000',
'7\t313_100000',
'7\t97a_200000',
'7\t97a_100000',
'7\t279_0',
'7\t166_500000',
'7\t166_400000',
'7\t166_200000',
'7\t166_100000',
'7\t166_0',
'7\t70a_0',
'7\t95_0',
'7\t56_200000',
'7\t14_1300000',
'7\t14_1400000',
'7\t14_1500000',
'7\t14_1600000',
'7\t14_1900000',
'7\t14_2000000',
'7\t14_2100000',
'7\t14_2600000',
'7\t41b_0',
'8\t109_700000',
'8\t109_600000',
'8\t109_500000',
'8\t109_400000',
'8\t109_300000',
'8\t109_200000',
'8\t109_100000',
'8\t11_2900000',
'8\t11_2800000',
'8\t11_2700000',
'8\t11_2600000',
'8\t11_2500000',
'8\t11_2400000',
'8\t11_2300000',
'8\t11_2100000',
'8\t11_2000000',
'8\t11_1900000',
'8\t11_1800000',
'8\t11_1700000',
'8\t11_1600000',
'8\t11_1500000',
'8\t11_1400000',
'8\t11_1300000',
'8\t11_1200000',
'8\t11_1100000',
'8\t11_1000000',
'8\t11_900000',
'8\t11_800000',
'8\t11_700000',
'8\t11_600000',
'8\t11_500000',
'8\t11_400000',
'8\t11_300000',
'8\t11_200000',
'8\t11_100000',
'8\t11_0',
'8\t59_0',
'8\t59_100000',
'8\t59_200000',
'8\t59_600000',
'8\t59_700000',
'8\t59_1100000',
'8\t76_300000',
'8\t76_400000',
'8\t76_500000',
'8\t76_600000',
'8\t76_700000',
'8\t76_800000',
'8\t76_900000',
'8\t76_1000000',
'8\t76_1100000',
'8\t233_300000',
'8\t233_200000',
'8\t233_100000',
'8\t233_0',
'8\t155_400000',
'8\t155_300000',
'8\t155_200000',
'8\t155_100000',
'8\t155_0',
'8\t46_100000',
'8\t46_200000',
'8\t46_300000',
'8\t46_400000',
'8\t46_500000',
'8\t46_600000',
'8\t46_700000',
'8\t46_800000',
'8\t46_900000',
'8\t46_1000000',
'8\t46_1300000',
'8\t46_1400000',
'8\t46_1500000',
'8\t412_0',
'8\t217_200000',
'8\t118_300000',
'8\t118_400000',
'8\t77_1000000',
'8\t77_900000',
'8\t77_800000',
'8\t12b_0',
'8\t12b_100000',
'8\t12b_200000',
'8\t12b_400000',
'8\t12b_500000',
'8\t12b_600000',
'8\t12b_700000',
'8\t12b_800000',
'8\t12b_900000',
'8\t12b_1000000',
'8\t12b_1100000',
'8\t12b_1200000',
'8\t12b_1300000',
'8\t12b_1400000',
'8\t12b_1500000',
'8\t12b_1600000',
'8\t12b_1700000',
'8\t12b_1800000',
'8\t12b_1900000',
'8\t12b_2000000',
'8\t12b_2100000',
'8\t12b_2200000',
'8\t12b_2300000',
'8\t12b_2400000',
'8\t24_2100000',
'8\t24_2000000',
'8\t24_1900000',
'8\t24_1800000',
'8\t24_1700000',
'8\t24_1600000',
'8\t24_1500000',
'8\t24_1400000',
'8\t24_1300000',
'8\t24_1200000',
'8\t24_1100000',
'8\t24_1000000',
'8\t24_900000',
'8\t24_800000',
'8\t24_700000',
'8\t24_600000',
'8\t24_500000',
'8\t24_400000',
'8\t24_300000',
'8\t24_200000',
'8\t24_100000',
'8\t24_0',
'8\t45_1600000',
'8\t45_1500000',
'8\t45_1400000',
'8\t45_1300000',
'8\t45_1200000',
'8\t45_1100000',
'8\t45_1000000',
'8\t45_900000',
'8\t45_800000',
'8\t45_700000',
'8\t45_400000',
'8\t45_300000',
'8\t45_200000',
'8\t45_100000',
'8\t45_0',
'8\t285_0',
'8\t285_100000',
'8\t285_200000',
'8\t3_0',
'8\t3_100000',
'8\t3_200000',
'8\t3_300000',
'8\t3_400000',
'8\t3_500000',
'8\t3_600000',
'8\t3_700000',
'8\t3_800000',
'8\t3_900000',
'8\t3_1000000',
'8\t3_1100000',
'8\t3_1200000',
'8\t3_1300000',
'8\t3_1400000',
'8\t3_1500000',
'8\t3_1700000',
'8\t3_1800000',
'8\t3_1900000',
'8\t3_2000000',
'8\t3_2100000',
'8\t3_2200000',
'8\t3_2300000',
'8\t3_2400000',
'8\t3_2500000',
'8\t3_2600000',
'8\t3_2800000',
'8\t3_2900000',
'8\t3_3000000',
'8\t3_3100000',
'8\t3_3200000',
'8\t3_3300000',
'8\t3_3400000',
'8\t3_3500000',
'8\t3_3600000',
'8\t3_3700000',
'8\t3_3800000',
'8\t3_3900000',
'8\t3_4000000',
'8\t3_4100000',
'8\t3_4200000',
'8\t3_4300000',
'9\t124_700000',
'9\t124_600000',
'9\t124_400000',
'9\t124_300000',
'9\t124_100000',
'9\t124_0',
'9\t20_2200000',
'9\t20_2100000',
'9\t20_2000000',
'9\t20_1900000',
'9\t20_1800000',
'9\t20_1600000',
'9\t20_1400000',
'9\t20_1200000',
'9\t20_1100000',
'9\t20_1000000',
'9\t20_800000',
'9\t20_700000',
'9\t20_600000',
'9\t20_500000',
'9\t20_400000',
'9\t20_300000',
'9\t157_500000',
'9\t157_400000',
'9\t157_300000',
'9\t238_200000',
'9\t238_0',
'9\t352_100000',
'9\t12a_100000',
'9\t152_100000',
'9\t152_0',
'9\t147_500000',
'9\t103b_300000',
'9\t103b_200000',
'9\t84_900000',
'9\t84_700000',
'9\t84_600000',
'9\t84_500000',
'9\t84_400000',
'9\t84_300000',
'9\t84_0',
'9\t97b_0',
'9\t63b_900000',
'9\t63b_700000',
'9\t63b_600000',
'9\t63b_500000',
'9\t63b_400000',
'9\t63b_300000',
'9\t63b_200000',
'9\t63b_100000',
'9\t63b_0',
'9\t7_3700000',
'9\t7_3600000',
'9\t7_3400000',
'9\t7_3300000',
'9\t7_3200000',
'9\t7_3100000',
'9\t7_3000000',
'9\t7_2900000',
'9\t7_2800000',
'9\t7_2700000',
'9\t7_2600000',
'9\t7_2500000',
'9\t7_2400000',
'9\t7_2300000',
'9\t7_2200000',
'9\t7_2100000',
'9\t7_2000000',
'9\t7_1900000',
'9\t7_1800000',
'9\t7_1700000',
'9\t7_1600000',
'9\t7_1400000',
'9\t7_1300000',
'9\t7_1200000',
'9\t7_1100000',
'9\t7_1000000',
'9\t7_900000',
'9\t7_800000',
'9\t7_700000',
'9\t7_600000',
'9\t7_500000',
'9\t7_400000',
'9\t7_300000',
'9\t7_200000',
'9\t7_100000',
'9\t7_0']
from string import maketrans
import operator
import random
from collections import defaultdict
###Stats and math functions
def weighted_sampler(pop_dict):
"""randomly sample a dictionary's keys based on weights stored as values
example:
m = {'a':3, 'b':2, 'c':5}
samps = [weighted_sampler(m) for _ in xrange(1000)]
#samps should be a ~ 300, b ~ 200, and c ~ 500
>>> samps.count('a')
304
>>> samps.count('b')
211
>>> samps.count('c')
485
of course, being a random sampler your results will vary"""
ch = random.random() * sum(pop_dict.values())
f = sorted(pop_dict.keys())
for i, w in enumerate([pop_dict[x] for x in f]):
ch -= w
if ch < 0: return f[i]
def choose(n,k):
'''implements binomial coefficient function
see: https://en.wikipedia.org/wiki/Binomial_coefficient
performance not tested on really large values'''
return reduce(lambda a,b: a*(n-b)/(b+1),xrange(k),1)
def sampler(pop, size, replacement=False):
'''a quick re-implementation of the python random sampler that
allows for sampling with or without replacement (pythons builtin only
allows without replacement)'''
if replacement:
return [random.choice(pop) for i in xrange(size)]
else:
return random.sample(pop, size)
def rank(x):
'''returns the sample rank of the elements in a list'''
out={}
idx=0
for i in x:
out[idx] = i
idx+=1
p1 = (j[0] for j in sorted(sort_dict_by_val(out), key=lambda s: s[1]))
p2 = range(len(x))
idx=0
for i in p1:
p2[i] = idx
idx+=1
return p2
def order(x):
'''returns the sample indeces that would return the list in sorted order
ie:
x = (4,3,406,5)
sorted(x) == [x[i] for i in order(x)]'''
out={}
idx=0
for i in x:
out[idx] = i
idx+=1
p1 = [j[0] for j in sorted(sort_dict_by_val(out), key=lambda s: s[1])]
return p1
###Useful functions for bioinformatics
###NOTE: biopython offers more robust versions, but sometimes you just need something quick and dirty
def revcom (s):
'''returns the reverse complement of a DNA sequence string
only accepts ACGT, upper or lowercase'''
rv_s = s[::-1] #strange python string reversal, it works!
trans_table = maketrans("atcgATCG", "tagcTAGC")
rv_comp_s = rv_s.translate(trans_table)
return rv_comp_s
def get_fasta(file_name):
'''read a properly formated fasta and return a dict
with key=readname and value=sequence
reads the whole file in'''
d = [i.strip() for i in open(file_name,'r')]
out={}
for i in d:
if i.startswith('>'):
curr_seq = i[1:]
out[curr_seq] = []
else:
out[curr_seq].append(i)
for i in out:
out[i] = ''.join(out[i])
return out
def get_fasta_buffer(file_name):
'''An efficient fasta reader that is buffered and therefore
useful for big fasta files. It returns each fasta one by
as a tuple -> (name, sequence). '''
file_iter = open(file_name)
current_seq = [] # a dummy, needed to get through the 1st read only
for line in file_iter:
if not line.startswith('>'):
current_seq.append(line.strip())
else:
if len(current_seq) != 0:
yield (current_name, ''.join(current_seq))
current_name = line[1:].strip()
current_seq = []
yield (current_name, ''.join(current_seq))
print_fasta = lambda s: ('>'+i+'\n' + s[i] for i in s)
###Set functions
def intersection(sets):
"""Get the intersection of all input sets"""
if all((type(i)==type(set()) for i in sets)):
return reduce(set.intersection, sets)
else:
sets = map(set, sets)
return reduce(set.intersection, sets)
def union(sets):
"""Get the union of all input sets"""
if all((type(i)==type(set()) for i in sets)):
return reduce(set.union, sets)
else:
sets = map(set, sets)
return reduce(set.union, sets)
def join(seqs):
"""Join any input sequences that support concatenation"""
return reduce(operator.concat, seqs)
#Misc
def get_file(filename, splitchar = 'NA', buffered = False):
if not buffered:
if splitchar == 'NA':
return [i.strip().split() for i in open(filename)]
else: return [i.strip().split(splitchar) for i in open(filename)]
else:
if splitchar == 'NA':
return (i.strip().split() for i in open(filename))
else: return (i.strip().split(splitchar) for i in open(filename))
def sort_dict_by_val(aDict):
'''returns a list of tuples sorted by the dict values'''
return sorted(aDict.iteritems(), key=lambda (k,v): (v,k))
def pairwise(li):
'''a convienience function that produces all pairwise comparisons from a list'''
for i in range(len(li)):
j = i+1
while j < len(li):
yield (li[i], li[j])
j += 1
class Hash(defaultdict):
'''works like a perl hash, auto-initializing
Note: be careful with "if x in Hash"'''
def __init__(self):
defaultdict.__init__(self, Hash)
def __reduce__(self):
r = defaultdict.__reduce__(self)
# override __init__ args
return (r[0], (), r[2], r[3], r[4])
def count_all(xlist, proportions=False):
'''Count all the items in a list, return a dict
with the item as key and counts as value.
If proportions are set to True, the values
are the proportions not counts'''
out = defaultdict(int)
for i in xlist: out[i]+=1
if proportions:
out2 = {}
tot_sz = float(sum(out.values()))
for i in out: out2[i] = out[i] / tot_sz
return out2
else: return out
###Combinatorial functions
def product(*args, **kwds):
''' product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111'''
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(iterable, r=None):
''' permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
permutations(range(3)) --> 012 021 102 120 201 210 '''
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def combinations(iterable, r):
''' combinations('ABCD', 2) --> AB AC AD BC BD CD
combinations(range(4), 3) --> 012 013 023 123 '''
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
'''combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC'''
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
from pprint import pprint
#print map(len, [IMPR_rils, IMF3, LVR, SF, SWC, v2_genome])
#
#for i in [IMPR_rils, IMF3, LVR, SF, SWC, v2_genome]:
# pprint( i[:4] )
#
def mkr_ord_list(x):
k, orient, dist = {}, {}, {}
curr_scaff, curr_chrom = '', ''
lens = {}
for i in x:
#print i
ch,scaff_pos, rf, marker_num, nil = i.split('\t')
if not rf: rf = 0
scaff,pos = scaff_pos.split('_')
cs = (ch, scaff)
rf = float(rf)
if ch not in k:
k[ch] = []
lens[ch] = {}
if scaff not in lens[ch]: lens[ch][scaff] = []
lens[ch][scaff].append(rf)
if scaff not in k[ch]:
k[ch].append(scaff)
#cs = (ch, scaff)
if cs not in orient: orient[cs] = []
orient[cs].append(int(pos))
for ch in k:
last_len = 0
for idx, mkr in enumerate(k[ch]):
rf_list = lens[ch][mkr]
if idx == 0:
if len(rf_list) > 1:
mklen = sum(rf_list[:-1])+0.5*rf_list[-1]
else: mklen = sum(rf_list)*0.5
last_len = rf_list[-1]
else:
if len(rf_list) > 1:
mklen = 0.5*last_len+sum(rf_list[:-1])+0.5*rf_list[-1]
else: mklen = 0.5*last_len+sum(rf_list)*0.5
last_len = rf_list[-1]
if mklen == 0: mklen+=0.001
dist[mkr] = mklen
dist['+'+mkr] = mklen
dist['-'+mkr] = mklen
scaff_sign = {}
for ch in k:
for scaff in k[ch]:
sign = 'NONE'
myo = orient[(ch, scaff)]
if len(myo) == 1: sign = '+'
else:
if myo[0] > myo[-1]: sign = '-'
elif myo[0] < myo[-1]: sign = '+'
scaff_sign[scaff] = sign
for idx in range(len(k[ch])):
scaff, sign = k[ch][idx], scaff_sign[k[ch][idx]]
k[ch][idx] = sign+scaff
#print ch, ' '.join(k[ch]), '\n'
dist_norm = {}
for ch in k:
mkr_tot = sum([dist[i] for i in k[ch]])
for i in k[ch]:
dist_norm[i] = dist[i] / mkr_tot
dist_norm[i[1:]] = dist[i] / mkr_tot
dist_norm['+'+i[1:]] = dist[i] / mkr_tot
dist_norm['-'+i[1:]] = dist[i] / mkr_tot
tot = 0
for i in k[ch]: tot+=dist_norm[i]
#print tot
return k, dist, dist_norm
#
q = map(mkr_ord_list, [IMPR_rils, LVR, SF, SWC, IMF3])
#pprint(q)
m = 'IMPR_rils, LVR, SF, SWC, IMF3'.replace(',','').split()
#
final_maps = dict(zip(m,[i[0] for i in q]))
lendict = dict(zip(m,[i[1] for i in q]))
lendict_norm = dict(zip(m,[i[2] for i in q]))
# lendict = { "1867": 7424,
# "1200": 10581,
# "4026": 4021,
# "4448": 3313,
# "4024": 3793,
# "4025": 5196,
# "4021": 3901,
# "643": 24903,
# "344": 133218,
# "346": 141074,
# "347": 129251,
# "341": 148400,
# "3999": 3828,
# "343": 236345,
# "3997": 3834,
# "348": 133633,
# "349": 145885,
# "3991": 3839,
# "3990": 3839,
# "4732": 2247,
# "4733": 4509,
# "4730": 3284,
# "4737": 3228,
# "2319": 6499,
# "2317": 6211,
# "2314": 7091,
# "2315": 8485,
# "2311": 6224,
# "299": 376137,
# "296": 241004,
# "297": 220817,
# "294": 213940,
# "292": 222798,
# "293": 210344,
# "290": 211645,
# "291": 226885,
# "3773": 4105,
# "3770": 4111,
# "270": 252120,
# "271": 283718,
# "4657": 6094,
# "274": 252822,
# "275": 296860,
# "276": 261204,
# "277": 416273,
# "278": 248600,
# "279": 342903,
# "738": 18936,
# "2260": 6420,
# "2440": 6838,
# "2441": 5948,
# "2447": 5939,
# "108": 917270,
# "109": 878603,
# "102": 941100,
# "100": 990628,
# "101": 1018856,
# "107": 901663,
# "105": 895826,
# "4813": 1881,
# "4812": 1886,
# "2044": 6835,
# "4810": 3093,
# "4817": 3055,
# "2043": 6836,
# "4819": 6522,
# "9a": 1518077,
# "9c": 507730,
# "9b": 1459161,
# "3519": 4364,
# "3510": 4375,
# "3517": 4368,
# "70b": 169442,
# "2684": 7649,
# "2687": 5493,
# "2680": 5693,
# "2682": 5502,
# "1436": 9176,
# "99": 1109881,
# "98": 1029260,
# "91": 1025861,
# "90": 1051226,
# "92": 1028281,
# "95": 995370,
# "94": 1112908,
# "96": 948774,
# "3207": 5959,
# "3205": 4943,
# "1625": 8307,
# "3200": 4746,
# "1999": 7702,
# "1629": 8657,
# "3208": 4739,
# "2860": 5195,
# "2863": 5852,
# "559": 60290,
# "558": 43747,
# "2866": 5184,
# "2869": 5182,
# "557": 39350,
# "553": 34762,
# "117b": 465505,
# "117a": 393100,
# "5055": 1376,
# "3349": 4581,
# "1193": 11249,
# "1192": 10663,
# "68a": 1210954,
# "1197": 10978,
# "1196": 10617,
# "1756": 7812,
# "1753": 7822,
# "1175": 38694,
# "1174": 10843,
# "1173": 11422,
# "1171": 12245,
# "4250": 3576,
# "55a": 416326,
# "55b": 1095436,
# "1285": 10006,
# "1284": 10011,
# "1287": 10122,
# "512": 42160,
# "1281": 10018,
# "514": 46621,
# "1579": 8495,
# "689": 21268,
# "877": 15509,
# "1572": 8543,
# "1571": 8548,
# "1570": 8861,
# "1577": 8600,
# "683": 21919,
# "682": 22171,
# "458": 64902,
# "459": 68683,
# "620": 26788,
# "626": 26651,
# "4592": 4712,
# "4389": 3209,
# "625": 26701,
# "4381": 3963,
# "2035": 6856,
# "405": 130937,
# "1372": 9516,
# "1375": 10032,
# "402": 78618,
# "401": 79613,
# "1376": 9486,
# "1379": 9471,
# "4848": 2164,
# "409": 136123,
# "408": 77247,
# "3834": 4022,
# "453": 68213,
# "3830": 4463,
# "2031": 7333,
# "3833": 4283,
# "4973": 1364,
# "3839": 4365,
# "455": 63117,
# "4031": 3779,
# "4030": 3779,
# "4033": 3775,
# "4035": 3770,
# "4034": 3770,
# "4036": 4052,
# "4038": 6488,
# "4735": 2236,
# "378": 127703,
# "370": 107315,
# "373": 132043,
# "372": 106712,
# "375": 106503,
# "374": 121416,
# "376": 113760,
# "4739": 2844,
# "393": 86514,
# "392": 86991,
# "390": 98016,
# "397": 126261,
# "396": 83187,
# "394": 85198,
# "399": 85182,
# "398": 88814,
# "4729": 2254,
# "2300": 6530,
# "4727": 2356,
# "4721": 2541,
# "2307": 6234,
# "39b": 828091,
# "39a": 843254,
# "963": 13337,
# "3743": 4338,
# "3740": 4590,
# "3741": 4240,
# "245": 347975,
# "244": 332933,
# "247": 327538,
# "246": 334458,
# "241": 345904,
# "240": 348442,
# "243": 330636,
# "242": 370998,
# "4628": 4395,
# "4629": 2954,
# "249": 311102,
# "248": 321936,
# "2277": 7817,
# "2270": 6609,
# "2273": 6299,
# "2278": 6286,
# "972": 13379,
# "2455": 5927,
# "2456": 6507,
# "2453": 5931,
# "179": 552771,
# "178": 562071,
# "177": 530788,
# "176": 563593,
# "174": 559428,
# "173": 545415,
# "172": 553694,
# "171": 558078,
# "170": 564839,
# "2050": 6825,
# "4538": 2851,
# "2054": 7445,
# "4828": 3143,
# "4829": 1826,
# "2059": 7183,
# "4536": 5093,
# "3693": 4681,
# "4531": 4868,
# "4820": 2883,
# "4983": 1480,
# "17b": 1129157,
# "17a": 1248856,
# "3523": 4363,
# "3521": 4363,
# "4655": 3151,
# "4654": 3475,
# "4651": 3017,
# "2693": 5978,
# "2692": 6238,
# "2690": 5684,
# "4557": 3198,
# "3230": 4813,
# "4555": 3092,
# "3236": 4929,
# "1616": 8480,
# "1617": 8325,
# "1966": 7059,
# "1615": 10079,
# "1613": 8333,
# "2870": 6240,
# "2878": 5366,
# "3357": 4572,
# "3358": 4572,
# "1768": 7766,
# "1765": 7783,
# "1140": 11199,
# "1147": 11133,
# "1145": 11654,
# "1148": 11487,
# "1149": 11118,
# "692": 22645,
# "1547": 8647,
# "691": 24193,
# "1542": 8771,
# "41a": 1528751,
# "41b": 121806,
# "1541": 8674,
# "541": 53241,
# "544": 89421,
# "545": 39327,
# "548": 40201,
# "549": 35571,
# "761": 18377,
# "114b": 560863,
# "114a": 311946,
# "4631": 2619,
# "414": 85134,
# "415": 76812,
# "416": 75252,
# "410": 75195,
# "412": 73813,
# "1386": 9438,
# "1387": 9531,
# "1380": 9469,
# "419": 71894,
# "1382": 10013,
# "3827": 4238,
# "3825": 4427,
# "3820": 4046,
# "3582": 4425,
# "3829": 4030,
# "3828": 4032,
# "5000": 3222,
# "68b": 51591,
# "5006": 1828,
# "5005": 8172,
# "4284": 3975,
# "4285": 3416,
# "4289": 4599,
# "368": 138051,
# "369": 109625,
# "366": 114101,
# "367": 123517,
# "365": 131756,
# "362": 115253,
# "363": 138577,
# "360": 166606,
# "361": 131757,
# "5008": 1277,
# "380": 113366,
# "381": 146697,
# "382": 93042,
# "383": 109326,
# "384": 132813,
# "385": 106512,
# "387": 111948,
# "389": 105536,
# "4759": 2140,
# "4750": 2720,
# "4751": 2171,
# "4754": 2154,
# "4756": 4353,
# "4757": 3289,
# "3759": 4118,
# "3751": 4124,
# "900": 14805,
# "3757": 4121,
# "2192": 6473,
# "2194": 7617,
# "2195": 6471,
# "4635": 3524,
# "252": 353764,
# "253": 314795,
# "250": 347741,
# "251": 331310,
# "255": 308530,
# "499": 45948,
# "2207": 6434,
# "2208": 9168,
# "2421": 5995,
# "2423": 5990,
# "2425": 6330,
# "2426": 5983,
# "168": 575611,
# "169": 549116,
# "164": 585109,
# "165": 576742,
# "166": 566453,
# "167": 563323,
# "160": 581095,
# "161": 585113,
# "162": 663850,
# "163": 666615,
# "4838": 1975,
# "3689": 4188,
# "3688": 4317,
# "4529": 3029,
# "4831": 1814,
# "4830": 3834,
# "4834": 1804,
# "2512": 5816,
# "2735": 5542,
# "2736": 5411,
# "2730": 5420,
# "2733": 5417,
# "4426": 3314,
# "3530": 4605,
# "3533": 6735,
# "3532": 6766,
# "5059": 1101,
# "5054": 1114,
# "3538": 4562,
# "5056": 1107,
# "5057": 1103,
# "5050": 1128,
# "5052": 1118,
# "5053": 1115,
# "1817": 8227,
# "1812": 7625,
# "80b": 61236,
# "80a": 1025492,
# "1093": 11651,
# "1819": 38262,
# "674": 23891,
# "670": 24088,
# "671": 120689,
# "4952": 1429,
# "273a": 183709,
# "273b": 60567,
# "3229": 4813,
# "1977": 7013,
# "3227": 4713,
# "3226": 4879,
# "3221": 6059,
# "803": 24054,
# "801": 17239,
# "807": 16707,
# "806": 16886,
# "2848": 5212,
# "2847": 5214,
# "2845": 5219,
# "2840": 5222,
# "2841": 5977,
# "3320": 4616,
# "3329": 4610,
# "115a": 494293,
# "115b": 348331,
# "320": 178831,
# "5a": 2017172,
# "5b": 1990000,
# "59": 1454023,
# "2998": 6861,
# "3038": 6647,
# "1554": 9150,
# "1557": 8987,
# "1556": 8606,
# "3035": 4965,
# "3036": 5084,
# "2993": 5005,
# "3030": 5617,
# "2995": 5004,
# "2996": 5003,
# "2997": 5982,
# "537": 53924,
# "536": 43629,
# "535": 46023,
# "534": 44673,
# "532": 40007,
# "531": 54897,
# "54": 1471776,
# "2992": 5686,
# "56": 1519343,
# "51": 1483456,
# "50": 1509628,
# "53": 1573146,
# "52": 1491097,
# "1975": 7019,
# "429": 73737,
# "428": 83649,
# "420": 76347,
# "1395": 11235,
# "422": 72653,
# "1393": 9503,
# "424": 79008,
# "427": 67336,
# "426": 71079,
# "305": 191876,
# "3815": 4784,
# "3819": 4047,
# "4296": 5225,
# "4292": 4068,
# "22b": 70555,
# "22a": 2130672,
# "4740": 2209,
# "4747": 2182,
# "4746": 4581,
# "4745": 2194,
# "4744": 2352,
# "221b": 184634,
# "221a": 201432,
# "4715": 2484,
# "2183": 6492,
# "229": 381296,
# "228": 387149,
# "226": 404557,
# "225": 385973,
# "224": 381622,
# "223": 378260,
# "222": 392726,
# "220": 417076,
# "2211": 6582,
# "2217": 6419,
# "864": 15298,
# "151": 651103,
# "150": 391169,
# "153": 640770,
# "152": 620319,
# "155": 637657,
# "154": 618181,
# "157": 612112,
# "156": 613654,
# "159": 593561,
# "158": 618308,
# "4824": 3253,
# "590": 29879,
# "4517": 3318,
# "4514": 6923,
# "4515": 3051,
# "2509": 6292,
# "2504": 6004,
# "2503": 5827,
# "2500": 5830,
# "2724": 5608,
# "5049": 2411,
# "5048": 6654,
# "5047": 1146,
# "5046": 1155,
# "5044": 1157,
# "2199": 6611,
# "5040": 1899,
# "3783": 4096,
# "3784": 6318,
# "3785": 5065,
# "3789": 4407,
# "4996": 4105,
# "4994": 1323,
# "4995": 1320,
# "4992": 1331,
# "4993": 1327,
# "4990": 2700,
# "4998": 1317,
# "12a": 2466319,
# "3717": 4155,
# "3258": 5231,
# "975": 13310,
# "819": 16511,
# "810": 17348,
# "811": 17166,
# "1991": 6980,
# "2850": 5207,
# "2853": 5204,
# "2855": 5673,
# "3332": 5282,
# "1492": 9504,
# "3330": 4609,
# "1494": 10397,
# "3336": 4600,
# "1396": 9487,
# "3339": 4886,
# "1700": 8011,
# "1702": 8007,
# "1705": 8002,
# "1392": 9955,
# "1129": 12104,
# "1121": 11372,
# "1123": 14532,
# "4579": 3353,
# "3027": 4971,
# "3026": 4972,
# "3025": 5262,
# "2987": 5013,
# "3021": 4975,
# "524": 38952,
# "527": 116365,
# "520": 42170,
# "521": 42400,
# "523": 39185,
# "695": 21007,
# "1016": 13682,
# "1012": 12699,
# "1234": 10295,
# "1235": 10543,
# "1236": 10737,
# "1231": 10519,
# "4198": 4466,
# "4199": 4160,
# "4194": 3560,
# "4191": 4271,
# "4353": 3269,
# "438": 65578,
# "4900": 1587,
# "435": 78918,
# "432": 155278,
# "433": 124240,
# "430": 68015,
# "431": 105747,
# "3806": 4066,
# "3803": 4068,
# "3802": 4070,
# "749": 18526,
# "4777": 2459,
# "4774": 2074,
# "4775": 7407,
# "4770": 4247,
# "4771": 3183,
# "4176": 4937,
# "4170": 3994,
# "5065": 1077,
# "5064": 1083,
# "238": 357132,
# "239": 349945,
# "4611": 4071,
# "235": 360907,
# "236": 367500,
# "230": 374814,
# "231": 425843,
# "232": 368463,
# "233": 366412,
# "1": 4921665,
# "146": 678631,
# "147": 696204,
# "144": 738782,
# "145": 791405,
# "142": 742927,
# "143": 710979,
# "141": 707463,
# "5069": 1062,
# "148": 635048,
# "149": 676572,
# "4507": 3675,
# "4506": 2926,
# "4500": 3625,
# "4503": 2938,
# "4502": 2942,
# "4509": 2919,
# "120": 851620,
# "2082": 7316,
# "2539": 6504,
# "3405": 4505,
# "2536": 5776,
# "2713": 5447,
# "2710": 5451,
# "2711": 5551,
# "2716": 5443,
# "2718": 5439,
# "127": 777152,
# "5072": 2155,
# "5070": 1061,
# "3345": 5085,
# "5076": 1048,
# "5077": 3336,
# "5074": 1053,
# "5075": 1052,
# "1832": 7669,
# "1833": 7663,
# "1830": 8271,
# "1831": 7574,
# "1836": 7829,
# "1834": 9046,
# "3794": 4081,
# "1838": 7548,
# "3796": 4078,
# "3792": 4184,
# "4981": 1351,
# "2156": 7902,
# "4982": 2013,
# "2402": 6030,
# "2403": 6227,
# "2152": 6576,
# "2401": 6030,
# "4988": 1333,
# "930": 14108,
# "2159": 7023,
# "1954": 7105,
# "1951": 7112,
# "1952": 7106,
# "4870": 1655,
# "3248": 4967,
# "4872": 1654,
# "3247": 5823,
# "3246": 4837,
# "4738": 2221,
# "826": 17072,
# "379a": 22535,
# "820": 16507,
# "379b": 74811,
# "4310": 3354,
# "618": 26335,
# "1483": 8913,
# "619": 30098,
# "1485": 8911,
# "1488": 12014,
# "3300": 4629,
# "795": 17911,
# "1710": 7986,
# "1717": 8075,
# "1715": 7978,
# "798": 18238,
# "4979": 1354,
# "1131": 11291,
# "1137": 11247,
# "1136": 11249,
# "1134": 11721,
# "920": 15032,
# "476": 54719,
# "3018": 4982,
# "3010": 5650,
# "3017": 4985,
# "925": 14062,
# "519": 43501,
# "510": 87450,
# "513": 51266,
# "1004": 14101,
# "1003": 13156,
# "1002": 12871,
# "516": 41830,
# "623": 27031,
# "1225": 10723,
# "1224": 10361,
# "627": 26248,
# "1222": 10389,
# "1221": 11279,
# "629": 26068,
# "4184": 3578,
# "4186": 4272,
# "4345": 3291,
# "4343": 3294,
# "2886": 5153,
# "2884": 5158,
# "2": 4557820,
# "3870": 3980,
# "3872": 3977,
# "3875": 3975,
# "11": 2975324,
# "10": 2979714,
# "13": 2637824,
# "15": 2589472,
# "14": 2653638,
# "16": 2571979,
# "19": 2344269,
# "18": 2290462,
# "3900": 4171,
# "3909": 3941,
# "4763": 2944,
# "4766": 2249,
# "4768": 2097,
# "4166": 4368,
# "4164": 4541,
# "4161": 3606,
# "4160": 3607,
# "201": 476088,
# "200": 490842,
# "203": 538100,
# "205": 488860,
# "204": 456649,
# "207": 433001,
# "206": 425272,
# "209": 412828,
# "208": 423504,
# "5012": 1266,
# "684": 44278,
# "686": 21711,
# "2231": 6393,
# "2232": 7011,
# "2235": 6388,
# "2234": 6390,
# "2236": 6382,
# "5093": 1010,
# "2185": 9715,
# "3496": 4397,
# "5094": 1010,
# "3495": 4498,
# "3492": 4407,
# "2095": 6877,
# "4571": 5495,
# "4572": 2784,
# "4573": 2880,
# "3655": 4231,
# "2189": 6480,
# "2523": 5931,
# "2522": 7467,
# "2528": 5896,
# "4603": 2691,
# "4888": 1622,
# "4889": 1620,
# "5067": 1064,
# "5066": 1070,
# "5061": 1095,
# "2708": 5454,
# "5063": 4201,
# "5062": 1093,
# "4881": 1637,
# "853": 16495,
# "4490": 3577,
# "4884": 1627,
# "4885": 2726,
# "4886": 1625,
# "4409": 3728,
# "4404": 3169,
# "4405": 4809,
# "4400": 4254,
# "1820": 7898,
# "1823": 7609,
# "928": 14227,
# "2410": 6687,
# "2414": 6003,
# "2416": 6001,
# "4974": 1562,
# "498": 55672,
# "4970": 1382,
# "4971": 2154,
# "4972": 1368,
# "2148": 8883,
# "3276": 4659,
# "3277": 4659,
# "1924": 7220,
# "495": 58908,
# "3474": 4429,
# "836": 15929,
# "837": 15899,
# "490": 49366,
# "838": 15899,
# "3478": 6820,
# "3479": 4422,
# "491": 89576,
# "3": 4381129,
# "493": 63970,
# "3318": 4617,
# "3312": 8360,
# "3315": 4621,
# "784": 17847,
# "785": 17765,
# "787": 18347,
# "781": 18501,
# "1724": 7945,
# "83b": 817115,
# "83a": 246274,
# "3005": 7339,
# "61": 1485772,
# "62": 1390486,
# "64": 1360977,
# "65": 1384717,
# "66": 1286115,
# "67": 1298324,
# "69": 1241070,
# "3009": 4992,
# "3008": 7098,
# "1588": 8904,
# "1580": 8491,
# "1581": 8788,
# "1373": 9503,
# "404": 137247,
# "508": 45109,
# "1032": 12535,
# "504": 47545,
# "505": 46492,
# "502": 46642,
# "1037": 12509,
# "1034": 12764,
# "501": 47276,
# "1212": 10599,
# "6a": 1346301,
# "1210": 11214,
# "6c": 2479288,
# "1216": 11547,
# "635": 26912,
# "636": 26287,
# "637": 25915,
# "638": 44119,
# "1218": 10449,
# "927": 14683,
# "4373": 3519,
# "4370": 4015,
# "4376": 3494,
# "4377": 3851,
# "4375": 4475,
# "465": 84889,
# "103b": 195487,
# "103c": 689748,
# "2896": 5910,
# "2890": 5147,
# "1457": 10431,
# "1451": 9093,
# "1452": 9093,
# "1108": 11496,
# "4082": 3712,
# "3863": 3990,
# "4089": 4303,
# "3861": 6138,
# "3867": 5142,
# "3866": 3984,
# "3148": 6757,
# "3915": 3935,
# "3914": 3936,
# "4922": 1536,
# "672": 23570,
# "4798": 2633,
# "4799": 1922,
# "4794": 1972,
# "4795": 1970,
# "4796": 1970,
# "4791": 3883,
# "4792": 1988,
# "4152": 4020,
# "4153": 4236,
# "4151": 5031,
# "4158": 3977,
# "216": 398432,
# "217": 385887,
# "214": 457125,
# "215": 399806,
# "212": 428987,
# "213": 416936,
# "210": 426754,
# "211": 448992,
# "219": 412849,
# "3489": 4777,
# "3488": 4409,
# "3487": 7066,
# "3486": 4627,
# "4563": 4386,
# "4561": 3327,
# "4564": 3395,
# "3646": 4239,
# "3643": 4244,
# "3641": 4246,
# "2550": 5744,
# "60": 1464918,
# "2553": 5741,
# "2555": 5735,
# "2557": 5732,
# "63b": 1132470,
# "2396": 6514,
# "5010": 1273,
# "193b": 65384,
# "193a": 410337,
# "5013": 1264,
# "5014": 1259,
# "5015": 1258,
# "5016": 1255,
# "5017": 1252,
# "4893": 3986,
# "2771": 5343,
# "4891": 2767,
# "4897": 1594,
# "2775": 5337,
# "4895": 1601,
# "1859": 7716,
# "4418": 3588,
# "4413": 3295,
# "1856": 7450,
# "2172": 6832,
# "2173": 7213,
# "2170": 6635,
# "2176": 7245,
# "918": 14269,
# "4969": 1383,
# "4968": 12670,
# "917": 14381,
# "2179": 6500,
# "4963": 1401,
# "4962": 1401,
# "4960": 1773,
# "2605": 6123,
# "2606": 5650,
# "2600": 5783,
# "2603": 5757,
# "2608": 5644,
# "1931": 7189,
# "1936": 7301,
# "1934": 7177,
# "3465": 4435,
# "841": 15876,
# "848": 15906,
# "3367": 5174,
# "733": 20553,
# "1737": 8357,
# "1039": 12364,
# "5041": 1159,
# "752": 18744,
# "755": 18707,
# "1806": 7643,
# "758": 18710,
# "3070": 4912,
# "3071": 4910,
# "1597": 8406,
# "2958": 5172,
# "3076": 5003,
# "3077": 6588,
# "3078": 4902,
# "2950": 5276,
# "1598": 8402,
# "2952": 5618,
# "2953": 5070,
# "1021": 12738,
# "1023": 12688,
# "1022": 40775,
# "1800": 7653,
# "63a": 233837,
# "1028": 12794,
# "936": 13820,
# "4365": 3834,
# "4364": 3757,
# "4367": 4745,
# "4361": 3707,
# "4360": 3945,
# "603": 32091,
# "4362": 5507,
# "1204": 10652,
# "1206": 10549,
# "4369": 3710,
# "4368": 3844,
# "1203": 10656,
# "633": 27168,
# "634": 26580,
# "3109": 4863,
# "3105": 6230,
# "3106": 4864,
# "3101": 4872,
# "1110": 11575,
# "1445": 9464,
# "1444": 9399,
# "1117": 11800,
# "1116": 11537,
# "467": 67048,
# "4099": 3688,
# "4098": 4128,
# "460": 58964,
# "489": 53455,
# "487": 51727,
# "484": 50785,
# "483": 53422,
# "482": 54180,
# "481": 88698,
# "480": 52404,
# "111a": 184681,
# "111b": 690094,
# "4853": 1694,
# "4852": 6754,
# "3921": 4356,
# "3927": 3924,
# "3929": 3923,
# "4786": 2014,
# "4784": 2023,
# "4782": 2998,
# "4145": 4315,
# "4825": 2491,
# "4147": 3722,
# "4141": 3636,
# "4143": 4887,
# "3856": 4133,
# "3855": 3998,
# "3852": 4003,
# "3851": 5467,
# "3665": 4693,
# "456": 61129,
# "103a": 86401,
# "199": 474936,
# "198": 458133,
# "195": 470344,
# "194": 493235,
# "197": 483139,
# "196": 563823,
# "191": 528052,
# "190": 491400,
# "192": 552233,
# "3678": 4204,
# "4558": 5888,
# "4559": 4644,
# "3673": 4307,
# "4554": 4010,
# "3671": 4820,
# "3676": 4305,
# "3677": 4204,
# "4550": 2928,
# "2543": 5755,
# "2540": 6302,
# "2545": 6013,
# "2549": 5746,
# "1105": 11524,
# "2389": 6051,
# "5003": 1301,
# "5002": 1301,
# "2769": 5589,
# "3583": 4303,
# "5007": 1277,
# "3585": 4620,
# "3586": 4401,
# "3587": 4767,
# "2762": 5370,
# "5009": 3031,
# "2760": 5377,
# "2765": 5365,
# "999": 12925,
# "4620": 3598,
# "4428": 3217,
# "905": 14420,
# "908": 14615,
# "4421": 3367,
# "1847": 7709,
# "1846": 8092,
# "4424": 8311,
# "254b": 140530,
# "254a": 147896,
# "4959": 1406,
# "2165": 7175,
# "4953": 2932,
# "4950": 3748,
# "2166": 6742,
# "4956": 3638,
# "4954": 1424,
# "4955": 5024,
# "2617": 5635,
# "2614": 5636,
# "2618": 5854,
# "1906": 8565,
# "3296": 4947,
# "3297": 4632,
# "854": 15761,
# "857": 15440,
# "852": 15876,
# "3455": 4739,
# "3458": 4446,
# "858": 38275,
# "1184": 10726,
# "740": 18808,
# "742": 19271,
# "743": 19069,
# "747": 40301,
# "4811": 1890,
# "3062": 5595,
# "821": 16629,
# "3069": 4913,
# "2944": 5081,
# "2942": 5083,
# "1051": 12951,
# "1052": 13779,
# "1054": 12136,
# "1056": 12112,
# "1699": 8016,
# "1278": 10043,
# "4311": 3919,
# "4312": 3552,
# "4313": 3775,
# "4316": 3343,
# "4317": 4566,
# "4318": 3676,
# "613": 46387,
# "610": 27396,
# "611": 29372,
# "1274": 14136,
# "1275": 10059,
# "1276": 11782,
# "1277": 10050,
# "3118": 4846,
# "3117": 4997,
# "3115": 4950,
# "3111": 4862,
# "3110": 5063,
# "1309": 9875,
# "4228": 3613,
# "4229": 3512,
# "4224": 4981,
# "4225": 3513,
# "496": 72198,
# "497": 48227,
# "4220": 3526,
# "4221": 3524,
# "4222": 3877,
# "4223": 3797,
# "24": 2129002,
# "25": 2014290,
# "27": 2067343,
# "20": 2339421,
# "21": 2330070,
# "23": 2146757,
# "28": 1986795,
# "29": 1960306,
# "4586": 3831,
# "5028": 1213,
# "5035": 4240,
# "3939": 3916,
# "3937": 4452,
# "3933": 5003,
# "4138": 3640,
# "4139": 3638,
# "4130": 3646,
# "4132": 3646,
# "4135": 3841,
# "3841": 4016,
# "3843": 4014,
# "3842": 4390,
# "3845": 4608,
# "3844": 4665,
# "3847": 4207,
# "3846": 4010,
# "3848": 4007,
# "4040": 3763,
# "4042": 3759,
# "4048": 4571,
# "7": 3840293,
# "257b": 120786,
# "257a": 165696,
# "2288": 6275,
# "3664": 4219,
# "3666": 4314,
# "3663": 4221,
# "2280": 6285,
# "2285": 6959,
# "2287": 6940,
# "4549": 3558,
# "4548": 4075,
# "974": 13217,
# "4539": 3547,
# "971": 13292,
# "4826": 1838,
# "2378": 6067,
# "4927": 1522,
# "2370": 6088,
# "2371": 6083,
# "3593": 4296,
# "3590": 4989,
# "5038": 1176,
# "5039": 1168,
# "5036": 1186,
# "3599": 4392,
# "3598": 6003,
# "5032": 2399,
# "5033": 2699,
# "5030": 1481,
# "5031": 1208,
# "4435": 3861,
# "1877": 8545,
# "1874": 7502,
# "4439": 3573,
# "4945": 1448,
# "4944": 1451,
# "4947": 3357,
# "4946": 1446,
# "4940": 1465,
# "4943": 1452,
# "970": 13679,
# "4949": 3640,
# "4948": 1442,
# "979": 13174,
# "182": 535838,
# "180": 563604,
# "181": 520570,
# "186": 494299,
# "187": 487559,
# "184": 494107,
# "185": 502018,
# "2110": 6789,
# "2111": 6682,
# "188": 532638,
# "2113": 9127,
# "2116": 6662,
# "2117": 6662,
# "2621": 6012,
# "2625": 5620,
# "5100": 1000,
# "1912": 7356,
# "1915": 7247,
# "868": 15627,
# "3449": 4554,
# "3445": 4928,
# "3444": 4460,
# "862": 17427,
# "865": 18092,
# "3440": 4736,
# "866": 18152,
# "2027": 7064,
# "2020": 7128,
# "2021": 7142,
# "2750": 5388,
# "2023": 6869,
# "887": 16666,
# "889": 15023,
# "4590": 2825,
# "44c": 194500,
# "104b": 564646,
# "104a": 383072,
# "775": 19522,
# "776": 19641,
# "771": 17792,
# "70c": 287558,
# "772": 17791,
# "779": 17656,
# "778": 17767,
# "77": 1171582,
# "76": 1152060,
# "75": 1159690,
# "74": 1242895,
# "73": 1227457,
# "72": 1279555,
# "71": 1266760,
# "3057": 4928,
# "3054": 4936,
# "2971": 5286,
# "2976": 5038,
# "79": 1123783,
# "2975": 5039,
# "1043": 12272,
# "1042": 13250,
# "1044": 43879,
# "1683": 8326,
# "1685": 9900,
# "2981": 7583,
# "3724": 4639,
# "4300": 4022,
# "4304": 3358,
# "608": 28955,
# "3023": 4974,
# "1261": 10166,
# "663": 25379,
# "1264": 10148,
# "3129": 4836,
# "3121": 4845,
# "1468": 9396,
# "8": 3478782,
# "1316": 9980,
# "1315": 10061,
# "1314": 9849,
# "1313": 10339,
# "1319": 40465,
# "4239": 3612,
# "4231": 4007,
# "4230": 3986,
# "319": 174329,
# "318": 162397,
# "3940": 4373,
# "311": 171438,
# "310": 172423,
# "317": 190570,
# "316": 271155,
# "315": 171956,
# "314": 173024,
# "4129": 3648,
# "4127": 4055,
# "4126": 3757,
# "4125": 4237,
# "2755": 5606,
# "97c": 216725,
# "97b": 558433,
# "440": 74820,
# "97a": 217123,
# "445": 76340,
# "444": 64634,
# "4057": 4048,
# "4054": 3974,
# "4051": 3911,
# "630": 26420,
# "3610": 4276,
# "3617": 4273,
# "48b": 679687,
# "2292": 6366,
# "2297": 6252,
# "631": 25661,
# "2568": 5713,
# "2368": 6103,
# "2364": 6810,
# "5029": 1213,
# "2586": 5691,
# "2582": 7226,
# "5021": 1243,
# "5020": 1245,
# "5022": 2062,
# "5024": 1234,
# "5027": 1214,
# "5026": 1223,
# "1862": 7430,
# "3729": 4758,
# "4446": 3643,
# "1869": 8776,
# "3726": 4151,
# "3727": 4148,
# "4686": 4731,
# "4931": 1506,
# "966": 13321,
# "4933": 1583,
# "4682": 3290,
# "4683": 3098,
# "4680": 3294,
# "4681": 4366,
# "4938": 1472,
# "4689": 3228,
# "2108": 6690,
# "32": 1881789,
# "2103": 7571,
# "2102": 7638,
# "2100": 6705,
# "31": 1921653,
# "2639": 5792,
# "2635": 5597,
# "2631": 5710,
# "3431": 4478,
# "3090": 4887,
# "871": 15580,
# "2741": 5859,
# "4593": 3839,
# "2743": 5395,
# "2745": 5770,
# "4597": 3491,
# "2747": 5835,
# "4845": 1751,
# "4846": 1742,
# "4599": 3124,
# "4842": 1767,
# "2038": 6843,
# "896": 14815,
# "897": 15528,
# "898": 14724,
# "899": 14619,
# "649": 27672,
# "3544": 4516,
# "3543": 4832,
# "2965": 5054,
# "1789": 7692,
# "768": 43371,
# "769": 17839,
# "762": 18149,
# "1783": 7979,
# "766": 18098,
# "765": 22506,
# "3040": 4958,
# "3047": 4947,
# "3046": 4948,
# "1535": 10659,
# "1070": 12362,
# "1674": 8533,
# "1675": 8085,
# "1672": 8088,
# "1096": 11721,
# "679": 22368,
# "4339": 3300,
# "4336": 3634,
# "4334": 4120,
# "4335": 3806,
# "4332": 3311,
# "1099": 13921,
# "4330": 4125,
# "3131": 4835,
# "3132": 5028,
# "1419": 9567,
# "1411": 9735,
# "1416": 9763,
# "1322": 12471,
# "1324": 10719,
# "1328": 11558,
# "1329": 9777,
# "4201": 3846,
# "770": 17821,
# "1257": 11260,
# "1254": 10210,
# "1520": 8859,
# "1253": 38928,
# "1522": 8855,
# "1529": 8725,
# "1258": 10169,
# "308": 203699,
# "309": 191298,
# "3959": 4402,
# "300": 226933,
# "3952": 3888,
# "303": 205006,
# "304": 183046,
# "3956": 3985,
# "3955": 4310,
# "307": 192848,
# "4116": 5059,
# "125b": 167710,
# "125a": 636078,
# "5011": 1271,
# "4899": 3260,
# "4063": 4839,
# "3055": 4933,
# "4064": 3739,
# "4068": 3734,
# "4069": 4772,
# "5018": 4081,
# "5019": 1249,
# "4639": 3255,
# "78": 1166408,
# "3600": 4291,
# "3606": 6422,
# "3608": 4277,
# "4896": 1598,
# "2358": 6110,
# "2350": 6945,
# "4453": 3054,
# "4451": 4021,
# "4455": 3047,
# "3738": 4141,
# "3737": 4141,
# "3736": 4514,
# "3735": 4244,
# "1897": 8067,
# "1891": 7329,
# "3730": 4714,
# "4923": 2877,
# "4690": 2417,
# "4693": 4948,
# "4692": 2406,
# "4695": 3522,
# "4926": 3162,
# "4697": 3963,
# "4698": 3483,
# "954": 42072,
# "957": 13816,
# "2139": 7070,
# "477": 54721,
# "2648": 5692,
# "2649": 5561,
# "4801": 1920,
# "2643": 5579,
# "2645": 5575,
# "3421": 4489,
# "4859": 1689,
# "4856": 1969,
# "4587": 2742,
# "2009": 7261,
# "4581": 2762,
# "4580": 5575,
# "4851": 1707,
# "506": 63023,
# "1268": 10109,
# "3551": 4340,
# "3550": 4440,
# "668": 23629,
# "667": 26351,
# "106a": 119873,
# "106b": 779175,
# "2916": 5119,
# "718": 19924,
# "1793": 7775,
# "716": 19880,
# "1790": 7687,
# "1797": 10828,
# "711": 20031,
# "1068": 12060,
# "4964": 1398,
# "1064": 13495,
# "1066": 11995,
# "1667": 8117,
# "1663": 8121,
# "1662": 8222,
# "1661": 8373,
# "4328": 3321,
# "593": 51690,
# "592": 29270,
# "594": 29260,
# "597": 40854,
# "1080": 14675,
# "598": 32368,
# "4327": 3322,
# "1088": 13155,
# "714": 20786,
# "3143": 4825,
# "3144": 4824,
# "2829": 6626,
# "3147": 5479,
# "2824": 5411,
# "44b": 765200,
# "44a": 618706,
# "2822": 5257,
# "2823": 5255,
# "3384": 4532,
# "3382": 4535,
# "713": 19975,
# "1408": 9306,
# "4622": 3804,
# "449": 61977,
# "4211": 3678,
# "4213": 3536,
# "4212": 3811,
# "443": 70747,
# "1336": 10004,
# "4218": 3733,
# "1333": 9741,
# "1332": 13100,
# "697": 21184,
# "38": 1736949,
# "694": 37410,
# "33": 1836976,
# "140b": 467135,
# "140a": 333610,
# "30": 1959376,
# "37": 1766781,
# "36": 1756251,
# "35": 1768623,
# "34": 1857198,
# "641": 25503,
# "1240": 10364,
# "1243": 10353,
# "1245": 10246,
# "3097": 4878,
# "647": 24687,
# "646": 24734,
# "1249": 10328,
# "1248": 10334,
# "3098": 4875,
# "1538": 8681,
# "338": 136902,
# "335": 149063,
# "334": 166883,
# "337": 168120,
# "336": 155826,
# "331": 159104,
# "330": 152831,
# "333": 156292,
# "332": 197385,
# "4101": 3979,
# "4100": 5289,
# "4102": 4015,
# "4104": 6174,
# "4107": 3673,
# "6b": 130743,
# "3899": 4053,
# "3890": 4188,
# "3897": 3953,
# "3895": 3955,
# "5001": 1303,
# "4079": 3721,
# "4074": 4498,
# "4077": 3722,
# "4070": 5412,
# "4072": 4791,
# "3964": 3872,
# "3960": 3978,
# "3968": 5548,
# "3638": 4248,
# "3634": 4620,
# "3632": 4510,
# "4614": 3230,
# "3708": 4352,
# "3709": 4667,
# "1889": 8820,
# "4469": 3613,
# "3702": 4701,
# "1886": 7634,
# "1885": 7351,
# "4462": 4462,
# "1882": 7457,
# "1881": 7799,
# "3705": 4169,
# "2121": 6648,
# "2123": 6637,
# "4669": 2790,
# "4918": 1542,
# "4919": 1541,
# "4664": 2492,
# "947": 13926,
# "4666": 3283,
# "4915": 1547,
# "4660": 3355,
# "4661": 2498,
# "4662": 3670,
# "941": 15595,
# "2659": 6313,
# "2658": 6849,
# "2650": 7741,
# "2656": 5540,
# "133": 756046,
# "132": 755020,
# "131": 763816,
# "130": 906360,
# "137": 716765,
# "136": 724484,
# "135": 731065,
# "134": 771926,
# "139": 693269,
# "138": 714843,
# "4868": 2971,
# "4862": 1679,
# "4863": 1676,
# "4860": 1687,
# "4866": 1665,
# "2010": 6908,
# "4865": 1671,
# "4594": 2715,
# "4484": 3614,
# "4486": 4073,
# "4481": 4520,
# "4483": 5874,
# "4489": 3190,
# "2903": 5128,
# "2902": 5130,
# "2900": 5131,
# "2907": 5124,
# "709": 20516,
# "2904": 5228,
# "704": 20182,
# "705": 20171,
# "2909": 5122,
# "701": 24212,
# "702": 20371,
# "703": 20299,
# "88": 1094372,
# "89": 1044716,
# "58b": 508861,
# "58a": 929093,
# "82": 1084529,
# "5068": 1063,
# "81": 1086570,
# "86": 1060287,
# "87": 1052442,
# "84": 1089051,
# "85": 1161279,
# "1652": 8785,
# "1651": 8470,
# "1654": 8267,
# "586": 55409,
# "587": 30078,
# "584": 44155,
# "580": 32568,
# "581": 43430,
# "589": 30531,
# "3157": 4894,
# "2837": 5226,
# "1634": 8277,
# "3158": 5417,
# "2833": 5233,
# "70a": 792174,
# "2831": 5336,
# "1983": 7208,
# "3391": 4518,
# "3390": 4959,
# "1435": 9380,
# "1430": 9203,
# "1431": 9201,
# "418": 83696,
# "4260": 3456,
# "1349": 12694,
# "4262": 3799,
# "450": 105348,
# "4269": 3785,
# "452": 60866,
# "1343": 9776,
# "454": 63847,
# "1345": 9667,
# "1346": 10286,
# "4467": 3721,
# "4464": 8209,
# "656": 24079,
# "313": 203464,
# "650": 25000,
# "651": 24677,
# "948": 114546,
# "494": 52669,
# "3882": 3970,
# "949": 13525,
# "945": 13561,
# "4913": 1548,
# "4910": 23986,
# "4009": 3816,
# "57": 1425041,
# "4002": 3821,
# "4007": 4091,
# "322": 209953,
# "323": 188342,
# "3973": 3961,
# "321": 184636,
# "326": 159093,
# "327": 168297,
# "324": 163501,
# "325": 156338,
# "329": 165813,
# "3625": 8712,
# "3624": 4268,
# "2331": 6673,
# "4716": 2318,
# "2335": 6615,
# "2336": 7332,
# "2338": 6160,
# "4958": 1413,
# "4719": 2316,
# "1591": 8440,
# "3715": 4417,
# "3714": 4161,
# "12b": 386549,
# "3713": 4163,
# "4478": 3206,
# "4471": 3741,
# "4476": 5266,
# "4909": 1564,
# "4908": 6073,
# "2489": 5847,
# "4901": 1587,
# "1599": 8394,
# "4902": 3445,
# "4905": 1578,
# "4904": 1579,
# "4907": 2502,
# "2248": 6712,
# "2246": 6359,
# "2243": 6368,
# "995": 38397,
# "2460": 6226,
# "1920": 7608,
# "2468": 6107,
# "3401": 4508,
# "121": 868757,
# "122": 803686,
# "123": 858631,
# "124": 800046,
# "126": 812523,
# "3406": 4504,
# "128": 768242,
# "1188": 10795,
# "3417": 4496,
# "1921": 9457,
# "4879": 2026,
# "2069": 7005,
# "2797": 5425,
# "2794": 5394,
# "1180": 10869,
# "943": 13623,
# "5098": 1004,
# "5099": 1001,
# "5071": 1058,
# "5091": 2679,
# "5092": 2494,
# "3576": 4309,
# "3571": 4444,
# "5095": 3114,
# "3573": 4312,
# "2666": 5526,
# "4496": 3515,
# "4495": 3749,
# "4493": 2957,
# "2663": 6449,
# "2660": 5533,
# "2661": 5532,
# "4498": 3072,
# "2939": 5267,
# "2937": 5091,
# "2934": 5096,
# "2935": 6752,
# "2932": 5098,
# "2930": 5101,
# "2931": 5100,
# "1645": 8213,
# "1644": 10809,
# "1643": 8393,
# "605": 28534,
# "579": 35549,
# "578": 47762,
# "604": 48825,
# "573": 45063,
# "570": 51136,
# "577": 40796,
# "607": 29952,
# "574": 32099,
# "2809": 5274,
# "3169": 5627,
# "2802": 6192,
# "2806": 5279,
# "3163": 4972,
# "600": 47556,
# "1997": 6962,
# "4249": 3476,
# "2404": 6019,
# "3362": 4570,
# "3363": 4804,
# "731": 19133,
# "4883": 1631,
# "732": 46617,
# "735": 18984,
# "734": 19422,
# "4985": 6949,
# "4984": 1539,
# "4271": 3439,
# "4277": 4965,
# "468": 60520,
# "1353": 9625,
# "464": 57508,
# "4279": 4080,
# "4986": 1337,
# "461": 72573,
# "1356": 9953,
# "1354": 9610,
# "48a": 867662,
# "4854": 3067,
# "4867": 1659,
# "1514": 9048,
# "1510": 8801,
# "3185": 5014,
# "3186": 4764,
# "3187": 4759,
# "3183": 4769,
# "4010": 4201,
# "357": 130133,
# "356": 122044,
# "355": 130840,
# "354": 162525,
# "353": 165561,
# "352": 122891,
# "351": 125418,
# "350": 149738,
# "3985": 5063,
# "3986": 3978,
# "3980": 3851,
# "359": 123709,
# "358": 121610,
# "26b": 1582383,
# "26a": 429086,
# "4855": 2589,
# "4706": 4152,
# "4705": 2448,
# "4704": 3159,
# "4703": 2354,
# "4702": 2527,
# "2329": 6187,
# "4709": 3294,
# "4708": 2340,
# "288": 275081,
# "280": 231171,
# "283": 226506,
# "285": 235440,
# "284": 223349,
# "287": 227990,
# "3760": 4863,
# "3763": 4114,
# "3767": 4567,
# "3769": 4111,
# "263": 312309,
# "262": 278079,
# "261": 278041,
# "260": 388917,
# "267": 262308,
# "266": 255916,
# "265": 276376,
# "264": 266746,
# "2499": 6220,
# "4648": 2844,
# "4649": 2540,
# "4642": 2807,
# "4640": 2578,
# "4641": 7666,
# "4646": 3760,
# "258": 363565,
# "2496": 6058,
# "2258": 6425,
# "2256": 6333,
# "2255": 6339,
# "2252": 6350,
# "5060": 1096,
# "988": 14593,
# "983": 14675,
# "980": 13163,
# "986": 13100,
# "987": 13097,
# "116": 863024,
# "110": 887204,
# "113": 959043,
# "112": 858645,
# "119": 825377,
# "118": 810285,
# "2784": 6132,
# "4803": 1916,
# "2781": 5325,
# "4805": 1903,
# "2782": 5323,
# "4808": 1896,
# "2788": 5310,
# "2079": 6764,
# "2070": 7131,
# "5089": 1014,
# "5088": 3170,
# "5083": 1030,
# "5082": 1031,
# "5081": 2660,
# "5086": 1027,
# "5085": 1581,
# "5084": 1029,
# "2675": 5510,
# "2674": 5513,
# "2679": 5505,
# "2678": 5652,
# "822": 18365,
# "2929": 5103,
# "2920": 5210,
# "2922": 5209,
# "281a": 180007,
# "281b": 47039,
# "3215": 4732,
# "1984": 6994,
# "3210": 4737,
# "3211": 6943,
# "1980": 7005,
# "1981": 6999,
# "3218": 4827,
# "3219": 4726,
# "706": 21991,
# "2810": 6247,
# "561": 71509,
# "562": 59788,
# "563": 50211,
# "564": 32349,
# "565": 36780,
# "566": 32882,
# "567": 33094,
# "3171": 4781,
# "3172": 4781,
# "3175": 4777,
# "3379": 4538,
# "1189": 10685,
# "1186": 11158,
# "3375": 4544,
# "3372": 7500,
# "3371": 4556,
# "727": 48291,
# "724": 44158,
# "722": 19435,
# "1749": 7840,
# "720": 19641,
# "721": 71723,
# "1740": 7886,
# "1741": 8338,
# "728": 19926,
# "729": 21741,
# "1164": 13875,
# "1165": 10898,
# "1160": 10991,
# "3308": 4625,
# "1162": 11306,
# "1163": 10911,
# "4245": 3480,
# "47b": 505801,
# "47a": 1056716,
# "49": 1547149,
# "46": 1600752,
# "45": 1654184,
# "42": 1650224,
# "43": 1645260,
# "40": 1680549,
# "1568": 9928,
# "1569": 9934,
# "471": 57825,
# "1362": 9550,
# "1290": 9988,
# "1296": 9966,
# "1294": 11086,
# "1295": 11107,
# "474": 55544,
# "793": 17043,
# "4398": 4410,
# "479": 91966,
# "791": 19403,
# "129b": 258072,
# "4391": 3804,
# "129a": 475805,
# "4395": 3190,
# "4397": 6294,
# "3197": 4848,
# "1365": 9534,
# "3193": 4976,
# "1368": 9527,
# "1360": 11024,
# "3198": 4747,
# "4b": 3869250,
# "3199": 4846,
# "4a": 235127,
# '96a': 100000}
#
# #n = set()
# #for i in final_maps:
# # for j in final_maps[i]:
# # q = final_maps[i][j]
# # n.update([xx.replace('-','').replace('+','') for xx in q])
#
# #print n - set(lendict)
#
#
# for i in lendict.keys():
# v = lendict[i]
# lendict['-'+i] = v
# lendict['+'+i] = v
#
# #map(mkr_ord_list, [SWC])
# #pprint(lendict)
from matplotlib import pyplot as plt
import sys
#from common import *
from pprint import pprint
#import maps
#final_maps, lendict = maps.final_maps, maps.lendict
#from mapsJKKoptimized import lendict_norm, final_maps
def plot_ord(scaff_ord, lendict, ypos, bump=0):
lenlist = [lendict[i] for i in map(lambda s: s[1:], scaff_ord)]
colorlist = []
for i in scaff_ord:
if '-' in i: colorlist.append('r')
else: colorlist.append('g')
pos = 0
out = {}
for i in range(len(scaff_ord)):
#print i
scaff, lenx, col = scaff_ord[i], lenlist[i], colorlist[i]
plt.plot([pos, (pos+lenx)], [ypos, ypos], color=col, linewidth=2, alpha=.74)
plt.plot([(pos+lenx), (pos+lenx)], [ypos+1, ypos-1], color='k', alpha=.74)
midpoint = (lenx * .5) + pos
if (i+1) % 2:
if bump:
plt.text(midpoint,ypos-4, s=scaff, rotation=90, size=9)
else:
plt.text(midpoint,ypos-2, s=scaff, rotation=90, size=9)
else:
if bump:
plt.text(midpoint, ypos+4, s = scaff, rotation=90, size=9)
else:
plt.text(midpoint, ypos+2, s = scaff, rotation=90, size=9)
out[scaff] = midpoint
pos+=lenx
plt.plot([0, 0], [ypos+1, ypos-1], color='k', alpha=.74)
return out
def connections(scaff_ord1, scaff_ord2, mids1, mids2, y1, y2):
def flip(x):
if '-' in x: return x.replace('-', '+')
elif '+' in x: return x.replace('+', '-')
#print len(mids1), len(mids2)
#print
for i in scaff_ord1:
plotIT = 0
if i in mids1:
m1 = mids1[i]
plotIT+=1
elif flip(i) in mids1:
m1 = mids1[flip(i)]
plotIT+=1
if i in mids2:
m2 = mids2[i]
plotIT+=1
elif flip(i) in mids2:
m2 = mids2[flip(i)]
plotIT+=1
if plotIT > 1: plt.plot([m1, m2], [y1, y2], color='k', alpha=.2)
pops = sys.argv[2:]
#pops = 'SF SWC IMPR_rils LVR IMF3'.split()
#pprint(final_maps.keys())
#print sys.argv[1]
for chrom in [sys.argv[1]]:
p = pops
yidx = 100
mx = 0
p0_mid = plot_ord(final_maps[p[0]][chrom], lendict_norm[p[0]], yidx, bump=0)
p1_mid = plot_ord(final_maps[p[1]][chrom], lendict_norm[p[1]], yidx-10, bump=0)
p2_mid = plot_ord(final_maps[p[2]][chrom], lendict_norm[p[2]], yidx-20, bump=0)
p3_mid = plot_ord(final_maps[p[3]][chrom], lendict_norm[p[3]], yidx-30, bump=0)
p4_mid = plot_ord(final_maps[p[4]][chrom], lendict_norm[p[4]], yidx-40, bump=0)
yPos = [100, 90, 80, 70, 60]
#plt.text(x = [-120000 for i in range(len(yPos))], y = yPos, s = pops)
#adjust title here
for xx,yy,pp in zip([-.1 for i in range(len(yPos))], yPos, pops): plt.text(xx,yy,pp)
for i in [p0_mid, p1_mid, p2_mid, p3_mid, p4_mid]:
if max(i.values()) > mx: mx = max(i.values())
no_sign = lambda s: [i.replace('-','').replace('+','') for i in s]
complete = lambda s, ovlp: [i for i in s if i.replace('-','').replace('+','') in ovlp]
m0, m1, m2, m3, m4 = map(no_sign, [final_maps[p[i]][chrom] for i in range(5)])
f0, f1, f2, f3, f4 = [final_maps[p[i]][chrom] for i in range(5)]
ovlp01, ovlp12, ovlp23, ovlp34, = set(m0).intersection(m1), set(m1).intersection(m2), set(m2).intersection(m3), set(m3).intersection(m4)
m0complete, m1complete = complete(f0, ovlp01), complete(f1, ovlp01)
#print m0, m1, ovlp01, m0complete, m1complete
connections(m0complete, m1complete, p0_mid, p1_mid, yidx, yidx-10)
m1complete, m2complete = complete(f1, ovlp12), complete(f2, ovlp12)
connections(m1complete, m2complete, p1_mid, p2_mid, yidx-10, yidx-20)
m2complete, m3complete = complete(f2, ovlp23), complete(f3, ovlp23)
connections(m2complete, m3complete, p2_mid, p3_mid, yidx-20, yidx-30)
m3complete, m4complete = complete(f3, ovlp34), complete(f4, ovlp34)
connections(m3complete, m4complete, p3_mid, p4_mid, yidx-30, yidx-40)
plt.title('Chromosome '+ chrom)
plt.xlabel('Normalized Genetic Map Length')
plt.yticks([])
plt.xlim(xmin=-.15, xmax = 1.1)
plt.show()
|
flag0010/mim.genet.alg
|
draw.any.map.py
|
Python
|
mit
| 354,192
|
[
"Biopython"
] |
b003771c67565ec6d065cab8bd17658debf95a91bf9e95fede112cbe166ad345
|
#!/usr/bin/python
"""Test of line navigation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Ctrl+Home",
["BRAILLE LINE: 'Home News Projects Art Support Development Community'",
" VISIBLE: 'Home News Projects Art Support D', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'News'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Projects'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Art'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Support'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Development'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Community'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'live.gnome.org h1 Search $l Titles push button Text push button'",
" VISIBLE: 'live.gnome.org h1 Search $l Tit', cursor=1",
"SPEECH OUTPUT: 'live.gnome.org heading level 1'",
"SPEECH OUTPUT: 'entry Search.'",
"SPEECH OUTPUT: 'Titles push button'",
"SPEECH OUTPUT: 'Text push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Home RecentChanges FindPage HelpContents Orca'",
" VISIBLE: 'Home RecentChanges FindPage Help', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'RecentChanges'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'FindPage'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'HelpContents'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: 'en Español'",
" VISIBLE: 'en Español', cursor=1",
"SPEECH OUTPUT: 'en Español'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'Home | Download/Installation | Configuration/Use | Accessible Applications | Mailing List \\('",
" VISIBLE: 'Home | Download/Installation | C', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Mailing List'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: 'Archives\\) | FAQ | DocIndex'",
" VISIBLE: 'Archives\\) | FAQ | DocIndex', cursor=1",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ') |'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'DocIndex'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: 'Welcome to Orca! h1'",
" VISIBLE: 'Welcome to Orca! h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to Orca! heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: 'Orca Logo'",
" VISIBLE: 'Orca Logo', cursor=1",
"SPEECH OUTPUT: 'Orca Logo link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["BRAILLE LINE: 'HOT HOT HOT: Notes on access to Firefox 3.0'",
" VISIBLE: 'HOT HOT HOT: Notes on access to ', cursor=1",
"SPEECH OUTPUT: 'HOT HOT HOT: Notes on'",
"SPEECH OUTPUT: 'access to Firefox 3.0'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: 'Contents'",
" VISIBLE: 'Contents', cursor=1",
"SPEECH OUTPUT: 'Contents'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: '1. Welcome to Orca!'",
" VISIBLE: '1. Welcome to Orca!', cursor=1",
"SPEECH OUTPUT: '1.'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["BRAILLE LINE: '2. About'",
" VISIBLE: '2. About', cursor=1",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"13. Line Down",
["BRAILLE LINE: '3. Audio Guides'",
" VISIBLE: '3. Audio Guides', cursor=1",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"14. Line Down",
["BRAILLE LINE: '4. Download/Installation'",
" VISIBLE: '4. Download/Installation', cursor=1",
"SPEECH OUTPUT: '4.'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"15. Line Down",
["BRAILLE LINE: '5. Configuration/Use'",
" VISIBLE: '5. Configuration/Use', cursor=1",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"16. Line Down",
["BRAILLE LINE: '6. Accessible Applications'",
" VISIBLE: '6. Accessible Applications', cursor=1",
"SPEECH OUTPUT: '6.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"17. Line Down",
["BRAILLE LINE: '7. How Can I Help?'",
" VISIBLE: '7. How Can I Help?', cursor=4",
"SPEECH OUTPUT: '7.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"18. Line Down",
["BRAILLE LINE: '8. More Information'",
" VISIBLE: '8. More Information', cursor=1",
"SPEECH OUTPUT: '8.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Line Down",
["BRAILLE LINE: 'About h1'",
" VISIBLE: 'About h1', cursor=1",
"SPEECH OUTPUT: 'About heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Line Down",
["BRAILLE LINE: 'Orca is a free, open source, flexible, extensible, and'",
" VISIBLE: 'Orca is a free, open source, fle', cursor=1",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"21. Line Down",
["BRAILLE LINE: 'powerful assistive technology for people with visual'",
" VISIBLE: 'powerful assistive technology fo', cursor=1",
"SPEECH OUTPUT: 'powerful assistive technology for people with visual'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"22. Line Down",
["BRAILLE LINE: 'impairments. Using various combinations of speech'",
" VISIBLE: 'impairments. Using various combi', cursor=1",
"SPEECH OUTPUT: 'impairments. Using various combinations of speech'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"23. Line Down",
["BRAILLE LINE: 'synthesis, braille, and magnification, Orca helps provide'",
" VISIBLE: 'synthesis, braille, and magnific', cursor=1",
"SPEECH OUTPUT: 'synthesis, braille, and magnification, Orca helps provide'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"24. Line Down",
["BRAILLE LINE: 'access to applications and toolkits that support the AT-SPI'",
" VISIBLE: 'access to applications and toolk', cursor=1",
"SPEECH OUTPUT: 'access to applications and toolkits that support the AT-SPI'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"25. Line Down",
["BRAILLE LINE: '(e.g., the GNOME desktop). The development of Orca has'",
" VISIBLE: '(e.g., the GNOME desktop). The d', cursor=1",
"SPEECH OUTPUT: '(e.g., the GNOME desktop). The development of Orca has'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"26. Line Down",
["BRAILLE LINE: 'been led by the Accessibility Program Office of Sun'",
" VISIBLE: 'been led by the Accessibility Pr', cursor=1",
"SPEECH OUTPUT: 'been led by the'",
"SPEECH OUTPUT: 'Accessibility Program Office of Sun'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"27. Line Down",
["BRAILLE LINE: 'Microsystems, Inc. with contributions from many'",
" VISIBLE: 'Microsystems, Inc. with contribu', cursor=1",
"SPEECH OUTPUT: 'Microsystems, Inc.'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'with'",
"SPEECH OUTPUT: 'contributions from many'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"28. Line Down",
["BRAILLE LINE: 'community members.'",
" VISIBLE: 'community members.', cursor=1",
"SPEECH OUTPUT: 'community members'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"29. Line Down",
["BRAILLE LINE: 'The complete list of work to do, including bugs and feature requests, along with known'",
" VISIBLE: 'The complete list of work to do,', cursor=1",
"SPEECH OUTPUT: 'The complete list of work to do, including bugs and feature requests, along with known'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"30. Line Down",
["BRAILLE LINE: 'problems in other components, is maintained in Bugzilla \\(please see our notes on how'",
" VISIBLE: 'problems in other components, is', cursor=1",
"SPEECH OUTPUT: 'problems in other components, is maintained in'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\(please see our'",
"SPEECH OUTPUT: 'notes on how'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"31. Line Down",
["BRAILLE LINE: 'we use Bugzilla\\).'",
" VISIBLE: 'we use Bugzilla\\).', cursor=1",
"SPEECH OUTPUT: 'we use Bugzilla'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"32. Line Down",
["BRAILLE LINE: 'Please join and participate on the Orca mailing list (archives): it's a helpful, kind, and'",
" VISIBLE: 'Please join and participate on t', cursor=1",
"SPEECH OUTPUT: 'Please join and participate on the'",
"SPEECH OUTPUT: 'Orca mailing list'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '): it's a helpful, kind, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"33. Line Down",
["BRAILLE LINE: 'productive environment composed of users and developers.'",
" VISIBLE: 'productive environment composed ', cursor=1",
"SPEECH OUTPUT: 'productive environment composed of users and developers.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"34. Line Down",
["BRAILLE LINE: 'Audio Guides h1'",
" VISIBLE: 'Audio Guides h1', cursor=1",
"SPEECH OUTPUT: 'Audio Guides heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"35. Line Down",
["BRAILLE LINE: 'Darragh Ó Héiligh has created several audio guides for Orca. This is a fantastic'",
" VISIBLE: 'Darragh Ó Héiligh has created se', cursor=1",
"SPEECH OUTPUT: 'Darragh Ó Héiligh'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'has created several audio guides for Orca. This is a fantastic'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"36. Line Down",
["BRAILLE LINE: 'contribution (THANKS!)!!! The audio guides can be found at'",
" VISIBLE: 'contribution (THANKS!)!!! The au', cursor=1",
"SPEECH OUTPUT: 'contribution (THANKS!)!!! The audio guides can be found at'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"37. Line Down",
["BRAILLE LINE: 'http://www.digitaldarragh.com/linuxat.asp and include the following:'",
" VISIBLE: 'http://www.digitaldarragh.com/li', cursor=1",
"SPEECH OUTPUT: 'http://www.digitaldarragh.com/linuxat.asp'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'and include the following:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"38. Line Down",
["BRAILLE LINE: '• Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
" VISIBLE: '• Walk through of the installati', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"39. Line Down",
["BRAILLE LINE: '• Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
" VISIBLE: '• Review of Fedora 7 and the Orc', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"40. Line Down",
["BRAILLE LINE: '• Guide to installing the latest versions of Firefox and Orca'",
" VISIBLE: '• Guide to installing the latest', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Guide to installing the latest versions of Firefox and Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"41. Line Down",
["BRAILLE LINE: 'Download/Installation h1'",
" VISIBLE: 'Download/Installation h1', cursor=1",
"SPEECH OUTPUT: 'Download/Installation heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"42. Line Down",
["BRAILLE LINE: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'",
" VISIBLE: 'As of GNOME 2.16, Orca is a part', cursor=1",
"SPEECH OUTPUT: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"43. Line Down",
["BRAILLE LINE: 'provided by default on a number of operating system distributions, including Open'",
" VISIBLE: 'provided by default on a number ', cursor=1",
"SPEECH OUTPUT: 'provided by default on a number of operating system distributions, including'",
"SPEECH OUTPUT: 'Open'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"44. Line Down",
["BRAILLE LINE: 'Solaris and Ubuntu.'",
" VISIBLE: 'Solaris and Ubuntu.', cursor=1",
"SPEECH OUTPUT: 'Solaris'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'Ubuntu'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"45. Line Down",
["BRAILLE LINE: 'Please also refer to the Download/Installation page for detailed information on various'",
" VISIBLE: 'Please also refer to the Downloa', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Download/Installation page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information on various'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"46. Line Down",
["BRAILLE LINE: 'distributions as well as installing Orca directly from source.'",
" VISIBLE: 'distributions as well as install', cursor=1",
"SPEECH OUTPUT: 'distributions as well as installing Orca directly from source.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"47. Line Down",
["BRAILLE LINE: 'Configuration/Use h1'",
" VISIBLE: 'Configuration/Use h1', cursor=1",
"SPEECH OUTPUT: 'Configuration/Use heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"48. Line Down",
["BRAILLE LINE: 'The command to run orca is orca. You can enter this command by pressing Alt+F2'",
" VISIBLE: 'The command to run orca is orca.', cursor=1",
"SPEECH OUTPUT: 'The command to run orca is orca. You can enter this command by pressing Alt+F2'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"49. Line Down",
["BRAILLE LINE: 'when logged in, waiting for a second or so, then typing orca and pressing return. Orca is'",
" VISIBLE: 'when logged in, waiting for a se', cursor=1",
"SPEECH OUTPUT: 'when logged in, waiting for a second or so, then typing orca and pressing return. Orca is'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"50. Line Down",
["BRAILLE LINE: 'designed to present information as you navigate the desktop using the built-in navigation'",
" VISIBLE: 'designed to present information ', cursor=1",
"SPEECH OUTPUT: 'designed to present information as you navigate the desktop using the'",
"SPEECH OUTPUT: 'built-in navigation'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"51. Line Down",
["BRAILLE LINE: 'mechanisms of GNOME. These navigation mechanisms are consistent across most'",
" VISIBLE: 'mechanisms of GNOME. These navig', cursor=1",
"SPEECH OUTPUT: 'mechanisms of GNOME'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '. These navigation mechanisms are consistent across most'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"52. Line Down",
["BRAILLE LINE: 'desktop applications.'",
" VISIBLE: 'desktop applications.', cursor=1",
"SPEECH OUTPUT: 'desktop applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"53. Line Down",
["BRAILLE LINE: 'You may sometimes wish to control Orca itself, such as bringing up the Orca'",
" VISIBLE: 'You may sometimes wish to contro', cursor=1",
"SPEECH OUTPUT: 'You may sometimes wish to control Orca itself, such as bringing up the'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"54. Line Down",
["BRAILLE LINE: 'Configuration GUI (accessed by pressing Insert+Space when Orca is running) and for'",
" VISIBLE: 'Configuration GUI (accessed by p', cursor=1",
"SPEECH OUTPUT: 'Configuration GUI'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '(accessed by pressing Insert+Space when Orca is running) and for'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"55. Line Down",
["BRAILLE LINE: 'using flat review mode to examine a window. Refer to Orca Keyboard Commands \\(Laptop'",
" VISIBLE: 'using flat review mode to examin', cursor=1",
"SPEECH OUTPUT: 'using flat review mode to examine a window. Refer to'",
"SPEECH OUTPUT: 'Orca Keyboard Commands'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\(Laptop'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"56. Line Down",
["BRAILLE LINE: 'Layout\\) for more information on Orca-specific keyboard commands. The Orca'",
" VISIBLE: 'Layout\\) for more information on ', cursor=1",
"SPEECH OUTPUT: 'Layout\\)'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for more information on Orca-specific keyboard commands. The'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"57. Line Down",
["BRAILLE LINE: 'Configuration GUI also includes a \"Key Bindings\" tab that allows you to get a complete list'",
" VISIBLE: 'Configuration GUI also includes ', cursor=1",
"SPEECH OUTPUT: 'Configuration GUI'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'also includes a \"Key Bindings\" tab that allows you to get a complete list'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"58. Line Down",
["BRAILLE LINE: 'of Orca key bindings.'",
" VISIBLE: 'of Orca key bindings.', cursor=1",
"SPEECH OUTPUT: 'of Orca key bindings.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"59. Line Down",
["BRAILLE LINE: 'Please also refer to the Configuration/Use page for detailed information.'",
" VISIBLE: 'Please also refer to the Configu', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Configuration/Use page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"60. Line Down",
["BRAILLE LINE: 'Accessible Applications h1'",
" VISIBLE: 'Accessible Applications h1', cursor=1",
"SPEECH OUTPUT: 'Accessible Applications heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"61. Line Down",
["BRAILLE LINE: 'Orca is designed to work with applications and toolkits that support the assistive'",
" VISIBLE: 'Orca is designed to work with ap', cursor=1",
"SPEECH OUTPUT: 'Orca is designed to work with applications and toolkits that support the assistive'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"62. Line Down",
["BRAILLE LINE: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'",
" VISIBLE: 'technology service provider inte', cursor=1",
"SPEECH OUTPUT: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"63. Line Down",
["BRAILLE LINE: 'applications, OpenOffice, Firefox, and the Java platform. Some applications work better'",
" VISIBLE: 'applications, OpenOffice, Firefo', cursor=1",
"SPEECH OUTPUT: 'applications,'",
"SPEECH OUTPUT: 'OpenOffice'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ', Firefox, and the Java platform. Some applications work better'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"64. Line Down",
["BRAILLE LINE: 'than others, however, and the Orca community continually works to provide compelling'",
" VISIBLE: 'than others, however, and the Or', cursor=1",
"SPEECH OUTPUT: 'than others, however, and the Orca community continually works to provide compelling'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"65. Line Down",
["BRAILLE LINE: 'access to more and more applications.'",
" VISIBLE: 'access to more and more applicat', cursor=1",
"SPEECH OUTPUT: 'access to more and more applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"66. Line Down",
["BRAILLE LINE: 'On the Accessible Applications page, you will find a growing list of information regarding'",
" VISIBLE: 'On the Accessible Applications p', cursor=1",
"SPEECH OUTPUT: 'On the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ', you will find a growing list of information regarding'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"67. Line Down",
["BRAILLE LINE: 'various applications that can be accessed with Orca as well as tips and tricks for using'",
" VISIBLE: 'various applications that can be', cursor=1",
"SPEECH OUTPUT: 'various applications that can be accessed with Orca as well as tips and tricks for using'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"68. Line Down",
["BRAILLE LINE: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide'",
" VISIBLE: 'them. The list is not to be a co', cursor=1",
"SPEECH OUTPUT: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"69. Line Down",
["BRAILLE LINE: 'a repository within which users can share experiences regarding applications they have'",
" VISIBLE: 'a repository within which users ', cursor=1",
"SPEECH OUTPUT: 'a repository within which users can share experiences regarding applications they have'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"70. Line Down",
["BRAILLE LINE: 'tested.'",
" VISIBLE: 'tested.', cursor=1",
"SPEECH OUTPUT: 'tested.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"71. Line Down",
["BRAILLE LINE: 'See also the Application Specific Settings page for how to configure settings specific to an'",
" VISIBLE: 'See also the Application Specifi', cursor=1",
"SPEECH OUTPUT: 'See also the'",
"SPEECH OUTPUT: 'Application Specific Settings'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'page for how to configure settings specific to an'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"72. Line Down",
["BRAILLE LINE: 'application.'",
" VISIBLE: 'application.', cursor=1",
"SPEECH OUTPUT: 'application.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"73. Line Down",
["BRAILLE LINE: 'Please also refer to the Accessible Applications page for detailed information.'",
" VISIBLE: 'Please also refer to the Accessi', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"74. Line Down",
["BRAILLE LINE: 'How Can I Help? h1'",
" VISIBLE: 'How Can I Help? h1', cursor=1",
"SPEECH OUTPUT: 'How Can I Help? heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"75. Line Down",
["BRAILLE LINE: 'There's a bunch you can do! Please refer to the How Can I Help page for detailed'",
" VISIBLE: 'There's a bunch you can do! Plea', cursor=1",
"SPEECH OUTPUT: 'There's a bunch you can do! Please refer to the'",
"SPEECH OUTPUT: 'How Can I Help page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"76. Line Down",
["BRAILLE LINE: 'information.'",
" VISIBLE: 'information.', cursor=1",
"SPEECH OUTPUT: 'information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"77. Line Down",
["BRAILLE LINE: 'More Information h1'",
" VISIBLE: 'More Information h1', cursor=1",
"SPEECH OUTPUT: 'More Information heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"78. Line Down",
["BRAILLE LINE: '• Frequently Asked Questions: FAQ'",
" VISIBLE: '• Frequently Asked Questions: FA', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Frequently Asked Questions:'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"79. Line Down",
["BRAILLE LINE: '• Mailing list: orca-list@gnome.org (Archives)'",
" VISIBLE: '• Mailing list: orca-list@gnome.', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Mailing list:'",
"SPEECH OUTPUT: 'orca-list@gnome.org'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"80. Line Down",
["BRAILLE LINE: '• Bug database: GNOME Bug Tracking System (Bugzilla) (current bug list)'",
" VISIBLE: '• Bug database: GNOME Bug Tracki', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Bug database:'",
"SPEECH OUTPUT: 'GNOME Bug Tracking System (Bugzilla)'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'current bug list'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"81. Line Down",
["BRAILLE LINE: '• Design documents: Orca Documentation Series'",
" VISIBLE: '• Design documents: Orca Documen', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Design documents:'",
"SPEECH OUTPUT: 'Orca Documentation Series'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"82. Line Down",
["BRAILLE LINE: '• Dive Into Python, Mark Pilgrim'",
" VISIBLE: '• Dive Into Python, Mark Pilgrim', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Dive Into Python, Mark Pilgrim'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"83. Line Down",
["BRAILLE LINE: '• Python in a Nutshell, Alex Martelli'",
" VISIBLE: '• Python in a Nutshell, Alex Mar', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python in a Nutshell, Alex Martelli'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"84. Line Down",
["BRAILLE LINE: '• Python Pocket Reference, Mark Lutz'",
" VISIBLE: '• Python Pocket Reference, Mark ', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python Pocket Reference, Mark Lutz'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"85. Line Down",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"86. Line Down",
["BRAILLE LINE: 'The information on this page and the other Orca-related pages on this site are distributed'",
" VISIBLE: 'The information on this page and', cursor=1",
"SPEECH OUTPUT: 'The information on this page and the other Orca-related pages on this site are distributed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"87. Line Down",
["BRAILLE LINE: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'",
" VISIBLE: 'in the hope that it will be usef', cursor=1",
"SPEECH OUTPUT: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"88. Line Down",
["BRAILLE LINE: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'",
" VISIBLE: 'warranty of MERCHANTABILITY or F', cursor=1",
"SPEECH OUTPUT: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"89. Line Down",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"90. Line Down",
["BRAILLE LINE: 'CategoryAccessibility'",
" VISIBLE: 'CategoryAccessibility', cursor=1",
"SPEECH OUTPUT: 'CategoryAccessibility'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"91. Line Down",
["BRAILLE LINE: 'Orca (last edited 2007-12-07 22:09:22 by WillieWalker)'",
" VISIBLE: 'Orca (last edited 2007-12-07 22:', cursor=1",
"SPEECH OUTPUT: 'Orca (last edited 2007-12-07 22:09:22 by'",
"SPEECH OUTPUT: 'WillieWalker'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"92. Line Down",
["BRAILLE LINE: 'User h3'",
" VISIBLE: 'User h3', cursor=1",
"SPEECH OUTPUT: 'User heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"93. Line Down",
["BRAILLE LINE: 'Login'",
" VISIBLE: 'Login', cursor=1",
"SPEECH OUTPUT: 'Login'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"94. Line Down",
["BRAILLE LINE: 'Page h3'",
" VISIBLE: 'Page h3', cursor=1",
"SPEECH OUTPUT: 'Page heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"95. Line Down",
["BRAILLE LINE: 'Immutable Page'",
" VISIBLE: 'Immutable Page', cursor=1",
"SPEECH OUTPUT: 'Immutable Page.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"96. Line Down",
["BRAILLE LINE: 'Info'",
" VISIBLE: 'Info', cursor=1",
"SPEECH OUTPUT: 'Info'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"97. Line Down",
["BRAILLE LINE: 'Attachments'",
" VISIBLE: 'Attachments', cursor=1",
"SPEECH OUTPUT: 'Attachments'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"98. Line Down",
["BRAILLE LINE: 'More Actions: combo box'",
" VISIBLE: 'More Actions: combo box', cursor=1",
"SPEECH OUTPUT: 'More Actions: combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"99. Line Down",
["BRAILLE LINE: 'GNOME World h3'",
" VISIBLE: 'GNOME World h3', cursor=1",
"SPEECH OUTPUT: 'GNOME World heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"100. Line Down",
["BRAILLE LINE: 'Wide h3'",
" VISIBLE: 'Wide h3', cursor=1",
"SPEECH OUTPUT: 'Wide heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"101. Line Down",
["BRAILLE LINE: 'GnomeWorldWide image'",
" VISIBLE: 'GnomeWorldWide image', cursor=1",
"SPEECH OUTPUT: 'GnomeWorldWide image link'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"102. Line Down",
["BRAILLE LINE: 'Copyright \xa9 2005, 2006, 2007 The GNOME Project.'",
" VISIBLE: 'Copyright \xa9 2005, 2006, 2007 The', cursor=1",
"SPEECH OUTPUT: 'Copyright \xa9 2005, 2006, 2007'",
"SPEECH OUTPUT: 'The GNOME Project'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"103. Line Down",
["BRAILLE LINE: 'Hosted by Red Hat.'",
" VISIBLE: 'Hosted by Red Hat.', cursor=1",
"SPEECH OUTPUT: 'Hosted by'",
"SPEECH OUTPUT: 'Red Hat'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/line_nav_wiki_down.py
|
Python
|
lgpl-2.1
| 43,500
|
[
"ORCA"
] |
5415301dab454c9efaa88017ca7e24e3a5907a753b290fe7587769322d6a14d8
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
from ansible.utils.path import makedirs_safe
BOOL_TRUE = frozenset([ "true", "t", "y", "1", "yes", "on" ])
def mk_boolean(value):
ret = value
if not isinstance(value, bool):
if value is None:
ret = False
ret = (str(value).lower() in BOOL_TRUE)
return ret
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False):
''' return a configuration variable with casting
:arg p: A ConfigParser object to look for the configuration in
:arg section: A section of the ini config that should be examined for this section.
:arg key: The config key to get this config from
:arg env_var: An Environment variable to check for the config var. If
this is set to None then no environment variable will be used.
:arg default: A default value to assign to the config var if nothing else sets it.
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:integer: Sets the value to an integer or raises a ValueType error
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:kwarg expand_relative_paths: for pathlist and path types, if this is set
to True then also change any relative paths into absolute paths. The
default is False.
'''
value = _get_config(p, section, key, env_var, default)
if value_type == 'boolean':
value = mk_boolean(value)
elif value:
if value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = shell_expand(value, expand_relative_paths=expand_relative_paths)
elif value_type == 'tmppath':
value = shell_expand(value)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
value = default
if p is not None:
try:
value = p.get(section, key, raw=True)
except:
pass
if env_var is not None:
env_value = os.environ.get(env_var, None)
if env_value is not None:
value = env_value
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
try:
path1 = os.getcwd() + "/ansible.cfg"
except OSError:
path1 = None
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
#
#### If --tags or --skip-tags is given multiple times on the CLI and this is
# True, merge the lists of tags together. If False, let the last argument
# overwrite any previous ones. Behaviour is overwrite through 2.2. 2.3
# overwrites but prints deprecation. 2.4 the default is to merge.
MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', True, value_type='boolean')
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH',
'~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles',
value_type='pathlist', expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FACT_PATH = get_config(p, DEFAULTS, 'fact_path', 'ANSIBLE_FACT_PATH', None, value_type='path')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer')
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, value_type='boolean')
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer')
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer')
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean')
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path')
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer')
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean')
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path')
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean')
DEFAULT_SSH_TRANSFER_METHOD = get_config(p, 'ssh_connection', 'transfer_method', 'ANSIBLE_SSH_TRANSFER_METHOD', None)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE',
["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, value_type='boolean')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
SHOW_CUSTOM_STATS = get_config(p, DEFAULTS, 'show_custom_stats', 'ANSIBLE_SHOW_CUSTOM_STATS', False, value_type='boolean')
NAMESPACE_FACTS = get_config(p, DEFAULTS, 'restrict_facts_namespace', 'ANSIBLE_RESTRICT_FACTS', False, value_type='boolean')
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean')
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean')
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean')
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean')
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean')
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs, 9p', value_type='list')
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean')
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean')
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean')
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean')
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given'
} # FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD',
'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean')
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS',
"apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS',
'~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS',
'~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS',
'~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS',
'~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS',
'~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY',
'~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules', value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS',
'~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils', value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS',
'~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS',
'~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS',
'~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS',
'~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS',
'~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos',
'vyos', 'sros', 'dellos9', 'dellos10', 'dellos6'],
value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer')
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean')
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean')
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean')
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list')
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean')
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean')
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean')
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean')
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean')
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list')
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean')
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean')
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean')
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path')
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none')
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean')
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, value_type='integer')
# CONNECTION RELATED
USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean')
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
### WARNING: Someone might be tempted to switch this from percent-formatting
# to .format() in the future. be sure to read this:
# http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ and understand
# that it may be a security risk to do so.
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', None)
ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp')
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean')
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer')
ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh')
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean')
PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean')
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean')
PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer')
PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 30, value_type='integer')
PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer')
# obsolete -- will be formally removed
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer')
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer')
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float')
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer')
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean')
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean')
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'],
value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS',
['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer')
DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"]
INTERNAL_RESULT_KEYS = ['add_host', 'add_group']
RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
|
maurofaccenda/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 30,696
|
[
"Galaxy",
"MOOSE"
] |
1214d45b86744ddf0c04b7085e88df0c110c55edbb84f72e96b23e34ac88d0cc
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkparameter.py,v $
## Language: Python
## Date: $Date: 2016/07/19 09:49:59 $
## Version: $Revision: 1.6 $
## Copyright (c) Jingfeng Jiang, Yu Wang. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import vtkvmtk
import sys
import pypes
vmtkparameter = 'vmtkfebiowrite'
class vmtkfebiowrite(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.SetScriptName('vmtkfebiowrite')
self.SetScriptDoc('interpolates the point data of a reference surface onto the input surface based on minimum distance criterion')
self.SetInputMembers([
['Surface','i','vtkUnstructuredGrid',1,'','the mesh surface','vmtkmeshreader']
])
self.SetOutputMembers([])
# ['dSurface','o','vtkUnstructuredGrid',1,'','the output surface','vmtkmeshwriter']
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No Surface.')
self.PrintLog('Computing projection.')
surfaceProjection = vtkvmtk.vtkvmtkParameterWriter()
surfaceProjection.SetInput(self.Surface)
surfaceProjection.SetResultFileName('displacement_z') # output file name
surfaceProjection.SetParameterArrayName('displacementz') # array attached to breast model
surfaceProjection.Write()
surfaceProjection.Update()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
jjiang-mtu/virtual-breast-project
|
quasi-static_UE/extract_array/vmtkparameter.py
|
Python
|
gpl-2.0
| 1,822
|
[
"VTK"
] |
33cc9bcf0e6523352a576d2a89ebe3f1c9f1b00376464f3a42d2056702d7f6e6
|
"""
flowfilter.update
-----------------
Module containing Python implementationso of the filter
image model and update methods.
:copyright: 2015, Juan David Adarve, ANU. See AUTHORS for more details
:license: 3-clause BSD, see LICENSE for more details
"""
import math
import numpy as np
import scipy.ndimage as nd
__all__ = ['imageModel', 'update', 'smoothFlow']
def imageModel(img, support=5):
"""Computes brightness model parameters.
Parameters
----------
img : ndarray
Input image in gray scale. If img.dtype is different than
float32, it is automatically converted.
support : integer, optional
Window support used for computing brightness parameters.
The value should be an odd number greater or equal 3.
Defaults to 5.
Returns
-------
A0 : ndarray
Constant brightness term.
Ax : ndarray
X (column) gradient component.
Ay : ndarray
Y (row) gradient component.
Raises
------
ValueError : support < 3 or support % 2 != 1:
"""
if support < 3 or support % 2 != 1:
raise ValueError('support should be an odd number greater or equal 3')
# input image dtype check
if img.dtype != np.float32:
img = img.astype(np.float32)
# creates convolution masks
if support == 3:
blur1D = np.array([[1.0, 2.0, 1.0]], dtype=np.float32)
gradient1D = np.array([[1.0, 0.0, -1.0]], dtype=np.float32)
else:
b = np.array([1.0, 1.0], dtype=np.float32)
blur1D = np.array([1.0, 1.0], dtype=np.float32)
for _ in range(support-2):
blur1D = np.convolve(blur1D, b, mode='full')
blur1D = np.reshape(blur1D, (1, blur1D.shape[0]))
halfSupport = np.floor(support/2)
gradient1D = np.arange(-halfSupport, halfSupport + 1, dtype=np.float)
gradient1D = np.reshape(gradient1D[::-1], (1, gradient1D.shape[0]))
# renormalize masks
blur1D /= np.sum(blur1D)
gradient1D *= blur1D
# Gaussian blurring in X and Y
imgBlurX = nd.convolve(img, blur1D)
imgBlurY = nd.convolve(img, blur1D.T)
# brightness parameters
Ax = nd.convolve(imgBlurY, gradient1D)
Ay = nd.convolve(imgBlurX, gradient1D.T)
A0 = nd.convolve(imgBlurY, blur1D)
return A0, Ax, Ay
def update(img, imgOld, flowPredicted, support=5, gamma=1.0):
"""Update the optical flow field provided new image data.
Parameters
----------
img : ndarray
New brightness image.
imgOld : ndarray
Old brightness image. This corresponds to the old
flowPredicted : ndarray
Predicted estimation of optical flow.
support : integer, optional
Window support used for computing brightness parameters.
The value should be an odd number greater or equal 3.
Defaults to 5.
gamma : float, optional
temporal regularization gain controlling the relevance
of the predicted flow in the update. Value should be
greater than 0.0. Defaults to 1.0.
Returns
-------
flowUpdated : ndarray
Updated optical flow field.
A0 : ndarray
Constant brightness model parameter computed from
img.
Raises
------
ValueError : if gamma <= 0.0
"""
if gamma <= 0.0: raise ValueError('gamma should be greater than zero')
# compute the image model parameters
A0, Ax, Ay = imageModel(img, support)
# temporal derivative
Yt = imgOld - A0
# adjunct matrix N elements for each pixel
N00 = np.zeros(img.shape); N00[:,:] = gamma + Ay*Ay
N01 = np.zeros(img.shape); N01[:,:] = -Ax*Ay
N10 = np.zeros(img.shape); N10[:,:] = np.copy(N01)
N11 = np.zeros(img.shape); N11[:,:] = gamma + Ax*Ax
# determinant of M for each pixel
detM = (gamma*(gamma + (Ax*Ax + Ay*Ay)))
# q components for each pixel
qx = gamma*flowPredicted[:,:,0] + Ax*Yt
qy = gamma*flowPredicted[:,:,1] + Ay*Yt
# compute the updated optic-flow
flowX = (N00*qx + N01*qy) / detM
flowY = (N10*qx + N11*qy) / detM
# pack the results
flowUpdated = np.concatenate([p[...,np.newaxis] for p in [flowX, flowY]], axis=2)
return flowUpdated, A0
def smoothFlow(flow, iterations=1, support=5):
"""Apply a smoothing filter to optical flow
Parameters
----------
flow : ndarray
iterations : integer, optional
support : integer, optional
"""
if iterations <= 0: raise ValueError('iterations should be greater than 1')
if support < 3 or support % 2 != 1:
raise ValueError('support should be an odd number greater or equal 3')
# average mask
avg_k = np.ones((support, support), dtype=np.float32) / float(support*support)
flowSmoothed = np.copy(flow)
for _ in range(iterations):
# apply smoothing to each flow component
for n in range(2):
flowSmoothed[...,n] = nd.convolve(flowSmoothed[...,n], avg_k)
return flowSmoothed
|
jadarve/optical-flow-filter
|
python/flowfilter/update.py
|
Python
|
bsd-3-clause
| 5,061
|
[
"Gaussian"
] |
1cbc009804cfe34de6d02e80f5885a988b94ade3908f2602495ea44efc5691e6
|
"""Settings for content moderation and spam control"""
from django.utils.translation import ugettext_lazy as _
from askbot import const
from askbot.deps import livesettings
from askbot.conf.settings_wrapper import settings
from askbot.conf.super_groups import EXTERNAL_SERVICES
SPAM_AND_MODERATION = livesettings.ConfigurationGroup(
'SPAM_AND_MODERATION',
_('Akismet spam protection'),
super_group = EXTERNAL_SERVICES
)
settings.register(
livesettings.BooleanValue(
SPAM_AND_MODERATION,
'USE_AKISMET',
description=_('Enable Akismet spam detection(keys below are required)'),
default=True,
help_text = _(
'To get an Akismet key please visit '
'<a href="%(url)s">Akismet site</a>'
) % {'url': const.DEPENDENCY_URLS['akismet']}
)
)
settings.register(
livesettings.StringValue(
SPAM_AND_MODERATION,
'AKISMET_API_KEY',
default='f7d94ffc1c28',
description=_('Akismet key for spam detection')
)
)
|
stianrh/askbot-nordic
|
askbot/conf/spam_and_moderation.py
|
Python
|
gpl-3.0
| 1,128
|
[
"VisIt"
] |
9c844d0d19819113bb9e2a689fca6a24cd67e74663ee633a8699f3c06eaed456
|
#-------------------------------------------------------------------------------
# . File : QMCallerQChem.py
# . Program : MolarisTools
# . Copyright : USC, Mikolaj Feliks (2015-2018)
# . License : GNU GPL v3.0 (http://www.gnu.org/licenses/gpl-3.0.en.html)
#-------------------------------------------------------------------------------
import subprocess, os.path, exceptions, collections
from MolarisTools.Parser import QChemOutputFile, EfieldFile
from MolarisTools.QMMM import QMCaller, CS_MULLIKEN, CS_CHELPG, CS_MERZKOLLMAN
Force = collections.namedtuple ("Force", "x y z")
_DEFAULT_SAV_FOLDER = "sav"
class QMCallerQChem (QMCaller):
"""A class to provide communication between Molaris and Q-Chem."""
# . Options specific to Q-Chem
defaultAttributes = {
"job" : "job" ,
"ncpu" : 1 ,
"memory" : 1 ,
"restart" : False ,
"exchange" : "" ,
"correlation" : "" ,
"basis" : "" ,
"method" : "B3LYP/6-31G*" ,
"scratch" : os.path.join (os.environ["QCSCRATCH"]) if os.environ.has_key ("QCSCRATCH") else "." ,
"pathQChem" : os.path.join (os.environ["QC"]) if os.environ.has_key ("QC") else "" ,
}
defaultAttributes.update (QMCaller.defaultAttributes)
def __init__ (self, **keywordArguments):
"""Constructor."""
super (QMCallerQChem, self).__init__ (**keywordArguments)
# . Prepare a Q-Chem input file
self._WriteInput ()
def _WriteInput (self):
"""Write QChem input files."""
# . Check for scratch space
if not os.path.exists (self.scratch):
os.makedirs (self.scratch)
# . Header
lines = ["$comment", ]
lines.append ("Q-Chem Job.")
lines.append ("$end")
lines.append ("")
# . Control section
lines.append ("$rem")
lines.append ("JOBTYPE FORCE")
if self.qmmm:
lines.append ("QM_MM TRUE")
lines.append ("QMMM_PRINT TRUE")
if self.exchange != "":
lines.append ("EXCHANGE %s" % self.exchange)
if self.correlation != "":
lines.append ("CORRELATION %s" % self.correlation)
if self.basis != "":
lines.append ("BASIS %s" % self.basis)
if self.method != "":
(method, basis) = self.method.split ("/")
lines.append ("METHOD %s" % method)
lines.append ("BASIS %s" % basis)
lines.append ("MEM_TOTAL %d" % (self.memory * 1000))
lines.append ("SCF_CONVERGENCE 5")
lines.append ("THRESH 12")
lines.append ("SYMMETRY OFF")
lines.append ("SYM_IGNORE TRUE")
if self.restart:
if os.path.exists (os.path.join (self.scratch, _DEFAULT_SAV_FOLDER)):
lines.append ("SCF_GUESS READ")
lines.append ("$end")
# . Molecule geometry
lines.append ("")
lines.append ("$molecule")
lines.append ("%d %d" % (self.charge, self.multiplicity))
atoms = self.molaris.qatoms + self.molaris.latoms
for atom in atoms:
lines.append ("%2s %16.10f %16.10f %16.10f" % (atom.label, atom.x, atom.y, atom.z))
lines.append ("$end")
lines.append ("")
# . Point charges
if self.qmmm:
lines.append ("$external_charges")
pointCharges = self.molaris.patoms + self.molaris.watoms
for atom in pointCharges:
lines.append ("%16.10f %16.10f %16.10f %12.4f" % (atom.x, atom.y, atom.z, atom.charge))
lines.append ("$end")
lines.append ("")
# . Write everything to a file
fo = open (os.path.join (self.scratch, (self.job + ".inp")), "w")
for line in lines:
fo.write (line + "\n")
fo.close ()
def Run (self):
"""Run the calculation."""
qchemInput = os.path.join (self.scratch , self.job + ".inp")
qchemOutput = os.path.join (self.scratch , self.job + ".out")
qchemError = os.path.join (self.scratch , self.job + ".err")
qchemEField = os.path.join (self.scratch , "efield.dat" )
# . Call Q-Chem
fileError = open (qchemError, "w")
if self.ncpu < 2:
command = [os.path.join (self.pathQChem, "bin", "qchem"), "-save", qchemInput, qchemOutput, _DEFAULT_SAV_FOLDER]
else:
command = [os.path.join (self.pathQChem, "bin", "qchem"), "-save", "-nt", "%d" % self.ncpu, qchemInput, qchemOutput, _DEFAULT_SAV_FOLDER]
subprocess.check_call (command, stdout=fileError, stderr=fileError)
fileError.close ()
# . Parse output files
qchem = QChemOutputFile (qchemOutput)
efield = EfieldFile (qchemEField)
if self.qmmm:
# . Calculate electrostatic forces acting on MM atoms
mmforces = []
pointCharges = self.molaris.patoms + self.molaris.watoms
nvectors = len (pointCharges)
for point, (ex, ey, ez) in zip (pointCharges, efield.field[:nvectors]):
force = Force (
x = ex * point.charge ,
y = ey * point.charge ,
z = ez * point.charge , )
mmforces.append (force)
self.mmforces = mmforces
# . Include forces on QM atoms
forces = []
for (fx, fy, fz) in efield.field[nvectors:]:
force = Force (
x = -fx ,
y = -fy ,
z = -fz , )
forces.append (force)
self.forces = forces
# . If there are point charges, remove their self interaction energy from the final QM energy
self.Efinal = (qchem.Efinal - qchem.Echrg) if self.qmmm else qchem.Efinal
# . Include charges
if self.chargeScheme == CS_MULLIKEN:
self.charges = qchem.charges
elif self.chargeScheme == CS_MERZKOLLMAN:
raise exceptions.StandardError ("Merz-Kollman charges are not (yet) implemented in QMCallerQChem.")
elif self.chargeScheme == CS_CHELPG:
raise exceptions.StandardError ("CHELPG charges are not (yet) implemented in QMCallerQChem.")
# . Finish up
self._Finalize ()
#===============================================================================
# . Main program
#===============================================================================
if __name__ == "__main__": pass
|
mfx9/molaris-tools
|
MolarisTools/QMMM/QMCallerQChem.py
|
Python
|
gpl-3.0
| 6,699
|
[
"Q-Chem"
] |
0867b52802d6cf6148e26f5697087d8943175b1364541dcd8589f8611159ed2e
|
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
#import xml.sax
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
ignore_ext = True
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1, ignore_ext=self.ignore_ext)
method = r._name
args = r._aslist()
kw = r._asdict()
if self.server.config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
ordered_args = {}
named_args = {}
if self.server.config.specialArgs:
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if self.server.config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if self.server.config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset=%s' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPInsecureRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
'''Request handler that does load POSTed doctypes'''
ignore_ext = False
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
|
burzillibus/RobHome
|
venv/lib/python2.7/site-packages/SOAPpy/Server.py
|
Python
|
mit
| 27,044
|
[
"Brian"
] |
842afcb9b41859dddf5c15fcec082f85d87e23bbddd380b858c9a7295a6bf2b6
|
# -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import os
import sys
import optparse
import time
from os import path
from io import open
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
except ImportError:
pass
from six import PY2, PY3, text_type, binary_type
from six.moves import input
from six.moves.urllib.parse import quote as urlquote
from docutils.utils import column_width
from sphinx import __display_version__
from sphinx.util.osutil import make_filename
from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util import texescape
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
# function to get input from terminal -- overridden by the test suite
term_input = input
DEFAULT_VALUE = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'epub': False,
'ext_autodoc': False,
'ext_doctest': False,
'ext_todo': False,
'makefile': True,
'batchfile': True,
}
EXTENSIONS = ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
'pngmath', 'mathjax', 'ifconfig', 'viewcode')
PROMPT_PREFIX = '> '
if PY3:
# prevents that the file is checked for being written in Python 2.x syntax
QUICKSTART_CONF = u'#!/usr/bin/env python3\n'
else:
QUICKSTART_CONF = u''
QUICKSTART_CONF += u'''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
# sphinx-quickstart on %(now)s.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [%(extensions)s]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['%(dot)stemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '%(suffix)s'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = '%(master_str)s'
# General information about the project.
project = u'%(project_str)s'
copyright = u'%(copyright_str)s'
author = u'%(author_str)s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '%(version_str)s'
# The full version, including alpha/beta/rc tags.
release = '%(release_str)s'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = %(language)r
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%%B %%d, %%Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [%(exclude_patterns)s]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = %(ext_todo)s
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['%(dot)sstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%%b %%d, %%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '%(project_fn)sdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%(project_fn)s.tex', u'%(project_doc_texescaped_str)s',
u'%(author_texescaped_str)s', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '%(project_manpage)s', u'%(project_doc_str)s',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '%(project_fn)s', u'%(project_doc_str)s',
author, '%(project_fn)s', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
'''
EPUB_CONFIG = u'''
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
'''
INTERSPHINX_CONFIG = u'''
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
'''
MASTER_FILE = u'''\
.. %(project)s documentation master file, created by
sphinx-quickstart on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to %(project)s's documentation!
===========%(project_underline)s=================
Contents:
.. toctree::
:maxdepth: %(mastertocmaxdepth)s
%(mastertoctree)s
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
'''
MAKEFILE = u'''\
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = %(rbuilddir)s
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error \
The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx \
installed, then set the SPHINXBUILD environment variable to point \
to the full path of the '$(SPHINXBUILD)' executable. Alternatively you \
can add the directory with the executable to your PATH. \
If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp \
epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
\t@echo " html to make standalone HTML files"
\t@echo " dirhtml to make HTML files named index.html in directories"
\t@echo " singlehtml to make a single large HTML file"
\t@echo " pickle to make pickle files"
\t@echo " json to make JSON files"
\t@echo " htmlhelp to make HTML files and a HTML help project"
\t@echo " qthelp to make HTML files and a qthelp project"
\t@echo " applehelp to make an Apple Help Book"
\t@echo " devhelp to make HTML files and a Devhelp project"
\t@echo " epub to make an epub"
\t@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
\t@echo " latexpdf to make LaTeX files and run them through pdflatex"
\t@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
\t@echo " text to make text files"
\t@echo " man to make manual pages"
\t@echo " texinfo to make Texinfo files"
\t@echo " info to make Texinfo files and run them through makeinfo"
\t@echo " gettext to make PO message catalogs"
\t@echo " changes to make an overview of all changed/added/deprecated items"
\t@echo " xml to make Docutils-native XML files"
\t@echo " pseudoxml to make pseudoxml-XML files for display purposes"
\t@echo " linkcheck to check all external links for integrity"
\t@echo " doctest to run all doctests embedded in the documentation \
(if enabled)"
\t@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
\trm -rf $(BUILDDIR)/*
html:
\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
\t@echo
\t@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
\t@echo
\t@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
\t@echo
\t@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
\t@echo
\t@echo "Build finished; now you can process the pickle files."
json:
\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
\t@echo
\t@echo "Build finished; now you can process the JSON files."
htmlhelp:
\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
\t@echo
\t@echo "Build finished; now you can run HTML Help Workshop with the" \\
\t ".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
\t@echo
\t@echo "Build finished; now you can run "qcollectiongenerator" with the" \\
\t ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
\t@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/%(project_fn)s.qhcp"
\t@echo "To view the help file:"
\t@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/%(project_fn)s.qhc"
applehelp:
\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
\t@echo
\t@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
\t@echo "N.B. You won't be able to view it unless you put it in" \\
\t "~/Library/Documentation/Help or install it in your application" \\
\t "bundle."
devhelp:
\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
\t@echo
\t@echo "Build finished."
\t@echo "To view the help file:"
\t@echo "# mkdir -p $$HOME/.local/share/devhelp/%(project_fn)s"
\t@echo "# ln -s $(BUILDDIR)/devhelp\
$$HOME/.local/share/devhelp/%(project_fn)s"
\t@echo "# devhelp"
epub:
\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
\t@echo
\t@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo
\t@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
\t@echo "Run \\`make' in that directory to run these through (pdf)latex" \\
\t "(use \\`make latexpdf' here to do that automatically)."
latexpdf:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo "Running LaTeX files through pdflatex..."
\t$(MAKE) -C $(BUILDDIR)/latex all-pdf
\t@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo "Running LaTeX files through platex and dvipdfmx..."
\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
\t@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
\t@echo
\t@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
\t@echo
\t@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
\t@echo
\t@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
\t@echo "Run \\`make' in that directory to run these through makeinfo" \\
\t "(use \\`make info' here to do that automatically)."
info:
\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
\t@echo "Running Texinfo files through makeinfo..."
\tmake -C $(BUILDDIR)/texinfo info
\t@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
\t@echo
\t@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
\t@echo
\t@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
\t@echo
\t@echo "Link check complete; look for any errors in the above output " \\
\t "or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
\t@echo "Testing of doctests in the sources finished, look at the " \\
\t "results in $(BUILDDIR)/doctest/output.txt."
coverage:
\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
\t@echo "Testing of coverage in the sources finished, look at the " \\
\t "results in $(BUILDDIR)/coverage/python.txt."
xml:
\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
\t@echo
\t@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
\t@echo
\t@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
'''
BATCHFILE = u'''\
@ECHO OFF
REM Command file for Sphinx documentation
if "%%SPHINXBUILD%%" == "" (
\tset SPHINXBUILD=sphinx-build
)
set BUILDDIR=%(rbuilddir)s
set ALLSPHINXOPTS=-d %%BUILDDIR%%/doctrees %%SPHINXOPTS%% %(rsrcdir)s
set I18NSPHINXOPTS=%%SPHINXOPTS%% %(rsrcdir)s
if NOT "%%PAPER%%" == "" (
\tset ALLSPHINXOPTS=-D latex_paper_size=%%PAPER%% %%ALLSPHINXOPTS%%
\tset I18NSPHINXOPTS=-D latex_paper_size=%%PAPER%% %%I18NSPHINXOPTS%%
)
if "%%1" == "" goto help
if "%%1" == "help" (
\t:help
\techo.Please use `make ^<target^>` where ^<target^> is one of
\techo. html to make standalone HTML files
\techo. dirhtml to make HTML files named index.html in directories
\techo. singlehtml to make a single large HTML file
\techo. pickle to make pickle files
\techo. json to make JSON files
\techo. htmlhelp to make HTML files and a HTML help project
\techo. qthelp to make HTML files and a qthelp project
\techo. devhelp to make HTML files and a Devhelp project
\techo. epub to make an epub
\techo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
\techo. text to make text files
\techo. man to make manual pages
\techo. texinfo to make Texinfo files
\techo. gettext to make PO message catalogs
\techo. changes to make an overview over all changed/added/deprecated items
\techo. xml to make Docutils-native XML files
\techo. pseudoxml to make pseudoxml-XML files for display purposes
\techo. linkcheck to check all external links for integrity
\techo. doctest to run all doctests embedded in the documentation if enabled
\techo. coverage to run coverage check of the documentation if enabled
\tgoto end
)
if "%%1" == "clean" (
\tfor /d %%%%i in (%%BUILDDIR%%\*) do rmdir /q /s %%%%i
\tdel /q /s %%BUILDDIR%%\*
\tgoto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%%SPHINXBUILD%% 2> nul
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%%SPHINXBUILD%% 2> nul
if errorlevel 9009 (
\techo.
\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx
\techo.installed, then set the SPHINXBUILD environment variable to point
\techo.to the full path of the 'sphinx-build' executable. Alternatively you
\techo.may add the Sphinx directory to PATH.
\techo.
\techo.If you don't have Sphinx installed, grab it from
\techo.http://sphinx-doc.org/
\texit /b 1
)
:sphinx_ok
if "%%1" == "html" (
\t%%SPHINXBUILD%% -b html %%ALLSPHINXOPTS%% %%BUILDDIR%%/html
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/html.
\tgoto end
)
if "%%1" == "dirhtml" (
\t%%SPHINXBUILD%% -b dirhtml %%ALLSPHINXOPTS%% %%BUILDDIR%%/dirhtml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/dirhtml.
\tgoto end
)
if "%%1" == "singlehtml" (
\t%%SPHINXBUILD%% -b singlehtml %%ALLSPHINXOPTS%% %%BUILDDIR%%/singlehtml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/singlehtml.
\tgoto end
)
if "%%1" == "pickle" (
\t%%SPHINXBUILD%% -b pickle %%ALLSPHINXOPTS%% %%BUILDDIR%%/pickle
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can process the pickle files.
\tgoto end
)
if "%%1" == "json" (
\t%%SPHINXBUILD%% -b json %%ALLSPHINXOPTS%% %%BUILDDIR%%/json
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can process the JSON files.
\tgoto end
)
if "%%1" == "htmlhelp" (
\t%%SPHINXBUILD%% -b htmlhelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/htmlhelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %%BUILDDIR%%/htmlhelp.
\tgoto end
)
if "%%1" == "qthelp" (
\t%%SPHINXBUILD%% -b qthelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/qthelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %%BUILDDIR%%/qthelp, like this:
\techo.^> qcollectiongenerator %%BUILDDIR%%\\qthelp\\%(project_fn)s.qhcp
\techo.To view the help file:
\techo.^> assistant -collectionFile %%BUILDDIR%%\\qthelp\\%(project_fn)s.ghc
\tgoto end
)
if "%%1" == "devhelp" (
\t%%SPHINXBUILD%% -b devhelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/devhelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished.
\tgoto end
)
if "%%1" == "epub" (
\t%%SPHINXBUILD%% -b epub %%ALLSPHINXOPTS%% %%BUILDDIR%%/epub
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The epub file is in %%BUILDDIR%%/epub.
\tgoto end
)
if "%%1" == "latex" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; the LaTeX files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "latexpdf" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tcd %%BUILDDIR%%/latex
\tmake all-pdf
\tcd %%~dp0
\techo.
\techo.Build finished; the PDF files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "latexpdfja" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tcd %%BUILDDIR%%/latex
\tmake all-pdf-ja
\tcd %%~dp0
\techo.
\techo.Build finished; the PDF files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "text" (
\t%%SPHINXBUILD%% -b text %%ALLSPHINXOPTS%% %%BUILDDIR%%/text
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The text files are in %%BUILDDIR%%/text.
\tgoto end
)
if "%%1" == "man" (
\t%%SPHINXBUILD%% -b man %%ALLSPHINXOPTS%% %%BUILDDIR%%/man
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The manual pages are in %%BUILDDIR%%/man.
\tgoto end
)
if "%%1" == "texinfo" (
\t%%SPHINXBUILD%% -b texinfo %%ALLSPHINXOPTS%% %%BUILDDIR%%/texinfo
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The Texinfo files are in %%BUILDDIR%%/texinfo.
\tgoto end
)
if "%%1" == "gettext" (
\t%%SPHINXBUILD%% -b gettext %%I18NSPHINXOPTS%% %%BUILDDIR%%/locale
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The message catalogs are in %%BUILDDIR%%/locale.
\tgoto end
)
if "%%1" == "changes" (
\t%%SPHINXBUILD%% -b changes %%ALLSPHINXOPTS%% %%BUILDDIR%%/changes
\tif errorlevel 1 exit /b 1
\techo.
\techo.The overview file is in %%BUILDDIR%%/changes.
\tgoto end
)
if "%%1" == "linkcheck" (
\t%%SPHINXBUILD%% -b linkcheck %%ALLSPHINXOPTS%% %%BUILDDIR%%/linkcheck
\tif errorlevel 1 exit /b 1
\techo.
\techo.Link check complete; look for any errors in the above output ^
or in %%BUILDDIR%%/linkcheck/output.txt.
\tgoto end
)
if "%%1" == "doctest" (
\t%%SPHINXBUILD%% -b doctest %%ALLSPHINXOPTS%% %%BUILDDIR%%/doctest
\tif errorlevel 1 exit /b 1
\techo.
\techo.Testing of doctests in the sources finished, look at the ^
results in %%BUILDDIR%%/doctest/output.txt.
\tgoto end
)
if "%%1" == "coverage" (
\t%%SPHINXBUILD%% -b coverage %%ALLSPHINXOPTS%% %%BUILDDIR%%/coverage
\tif errorlevel 1 exit /b 1
\techo.
\techo.Testing of coverage in the sources finished, look at the ^
results in %%BUILDDIR%%/coverage/python.txt.
\tgoto end
)
if "%%1" == "xml" (
\t%%SPHINXBUILD%% -b xml %%ALLSPHINXOPTS%% %%BUILDDIR%%/xml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The XML files are in %%BUILDDIR%%/xml.
\tgoto end
)
if "%%1" == "pseudoxml" (
\t%%SPHINXBUILD%% -b pseudoxml %%ALLSPHINXOPTS%% %%BUILDDIR%%/pseudoxml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The pseudo-XML files are in %%BUILDDIR%%/pseudoxml.
\tgoto end
)
:end
'''
# This will become the Makefile template for Sphinx 1.5.
MAKEFILE_NEW = u'''\
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = %(project_fn)s
SOURCEDIR = %(rsrcdir)s
BUILDDIR = %(rbuilddir)s
# User-friendly check for sphinx-build.
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error \
The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx \
installed, then set the SPHINXBUILD environment variable to point \
to the full path of the '$(SPHINXBUILD)' executable. Alternatively you \
can add the directory with the executable to your PATH. \
If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Has to be explicit, otherwise we don't get "make" without targets right.
help:
\t@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
# You can add custom targets here.
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%%:
\t@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
'''
# This will become the make.bat template for Sphinx 1.5.
BATCHFILE_NEW = u'''\
@ECHO OFF
REM Command file for Sphinx documentation
if "%%SPHINXBUILD%%" == "" (
\tset SPHINXBUILD=sphinx-build
)
set SOURCEDIR=%(rsrcdir)s
set BUILDDIR=%(rbuilddir)s
set SPHINXPROJ=%(project_fn)s
if "%%1" == "" goto help
%%SPHINXBUILD%% 2> nul
if errorlevel 9009 (
\techo.
\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx
\techo.installed, then set the SPHINXBUILD environment variable to point
\techo.to the full path of the 'sphinx-build' executable. Alternatively you
\techo.may add the Sphinx directory to PATH.
\techo.
\techo.If you don't have Sphinx installed, grab it from
\techo.http://sphinx-doc.org/
\texit /b 1
)
%%SPHINXBUILD%% -M %%1 %%SOURCEDIR%% %%BUILDDIR%% %%SPHINXOPTS%%
goto end
:help
%%SPHINXBUILD%% -M help %%SOURCEDIR%% %%BUILDDIR%% %%SPHINXOPTS%%
:end
'''
def mkdir_p(dir):
if path.isdir(dir):
return
os.makedirs(dir)
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x):
x = path.expanduser(x)
if path.exists(x) and not path.isdir(x):
raise ValidationError("Please enter a valid path name.")
return x
def nonempty(x):
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
def val(x):
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
return val
def boolean(x):
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
return x
def ok(x):
return x
def term_decode(text):
if isinstance(text, text_type):
return text
# for Python 2.x, try to get a Unicode string out of it
if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
return text
if TERM_ENCODING:
text = text.decode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII characters entered '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if PY2:
# for Python 2.x, try to get a Unicode string out of it
if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
!= prompt:
if TERM_ENCODING:
prompt = prompt.encode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII default value provided '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
prompt = prompt.encode('utf-8')
except UnicodeEncodeError:
prompt = prompt.encode('latin1')
prompt = purple(prompt)
x = term_input(prompt).strip()
if default and not x:
x = default
x = term_decode(x)
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
d[key] = x
if PY3:
# remove Unicode literal prefixes
def _convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
return rex.sub('\\1', source)
for f in ['QUICKSTART_CONF', 'EPUB_CONFIG', 'INTERSPHINX_CONFIG']:
globals()[f] = _convert_python_source(globals()[f])
del _convert_python_source
def ask_user(d):
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* epub: use epub (bool)
* ext_*: extensions to use (bools)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
print('''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).''')
if 'path' in d:
print(bold('''
Selected root path: %s''' % d['path']))
else:
print('''
Enter the root path for documentation.''')
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold('Error: an existing conf.py has been found in the '
'selected root path.'))
print('sphinx-quickstart will not overwrite existing Sphinx projects.')
print()
do_prompt(d, 'path', 'Please enter a new root path (or just Enter '
'to exit)', '', is_path)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print('''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory "_build" within the root path, or you separate
"source" and "build" directories within the root path.''')
do_prompt(d, 'sep', 'Separate source and build directories (y/n)', 'n',
boolean)
if 'dot' not in d:
print('''
Inside the root directory, two more directories will be created; "_templates"
for custom HTML templates and "_static" for custom stylesheets and other static
files. You can enter another prefix (such as ".") to replace the underscore.''')
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok)
if 'project' not in d:
print('''
The project name will occur in several places in the built documentation.''')
do_prompt(d, 'project', 'Project name')
if 'author' not in d:
do_prompt(d, 'author', 'Author name(s)')
if 'version' not in d:
print('''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.''')
do_prompt(d, 'version', 'Project version')
if 'release' not in d:
do_prompt(d, 'release', 'Project release', d['version'])
if 'language' not in d:
print('''
If the documents are to be written in a language other than English,
you can select a language here by its language code. Sphinx will then
translate text that it generates into that language.
For a list of supported codes, see
http://sphinx-doc.org/config.html#confval-language.''')
do_prompt(d, 'language', 'Project language', 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print('''
The file name suffix for source files. Commonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.''')
do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
if 'master' not in d:
print('''
One document is special in that it is considered the top node of the
"contents tree", that is, it is the root of the hierarchical structure
of the documents. Normally, this is "index", but if your "index"
document is a custom template, you can also set this to another filename.''')
do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index')
while path.isfile(path.join(d['path'], d['master']+d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master']+d['suffix'])):
print()
print(bold('Error: the master file %s has already been found in the '
'selected root path.' % (d['master']+d['suffix'])))
print('sphinx-quickstart will not overwrite the existing file.')
print()
do_prompt(d, 'master', 'Please enter a new file name, or rename the '
'existing file and press Enter', d['master'])
if 'epub' not in d:
print('''
Sphinx can also add configuration for epub output:''')
do_prompt(d, 'epub', 'Do you want to use the epub builder (y/n)',
'n', boolean)
if 'ext_autodoc' not in d:
print('''
Please indicate if you want to use one of the following Sphinx extensions:''')
do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
'from modules (y/n)', 'n', boolean)
if 'ext_doctest' not in d:
do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
'in doctest blocks (y/n)', 'n', boolean)
if 'ext_intersphinx' not in d:
do_prompt(d, 'ext_intersphinx', 'intersphinx: link between Sphinx '
'documentation of different projects (y/n)', 'n', boolean)
if 'ext_todo' not in d:
do_prompt(d, 'ext_todo', 'todo: write "todo" entries '
'that can be shown or hidden on build (y/n)', 'n', boolean)
if 'ext_coverage' not in d:
do_prompt(d, 'ext_coverage', 'coverage: checks for documentation '
'coverage (y/n)', 'n', boolean)
if 'ext_pngmath' not in d:
do_prompt(d, 'ext_pngmath', 'pngmath: include math, rendered '
'as PNG images (y/n)', 'n', boolean)
if 'ext_mathjax' not in d:
do_prompt(d, 'ext_mathjax', 'mathjax: include math, rendered in the '
'browser by MathJax (y/n)', 'n', boolean)
if d['ext_pngmath'] and d['ext_mathjax']:
print('''Note: pngmath and mathjax cannot be enabled at the same time.
pngmath has been deselected.''')
d['ext_pngmath'] = False
if 'ext_ifconfig' not in d:
do_prompt(d, 'ext_ifconfig', 'ifconfig: conditional inclusion of '
'content based on config values (y/n)', 'n', boolean)
if 'ext_viewcode' not in d:
do_prompt(d, 'ext_viewcode', 'viewcode: include links to the source '
'code of documented Python objects (y/n)', 'n', boolean)
if 'no_makefile' in d:
d['makefile'] = False
elif 'makefile' not in d:
print('''
A Makefile and a Windows command file can be generated for you so that you
only have to run e.g. `make html' instead of invoking sphinx-build
directly.''')
do_prompt(d, 'makefile', 'Create Makefile? (y/n)', 'y', boolean)
if 'no_batchfile' in d:
d['batchfile'] = False
elif 'batchfile' not in d:
do_prompt(d, 'batchfile', 'Create Windows command file? (y/n)',
'y', boolean)
print()
def generate(d, overwrite=True, silent=False):
"""Generate project based on values in *d*."""
texescape.init()
indent = ' ' * 4
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['project_fn'] = make_filename(d['project'])
d['project_url'] = urlquote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
extensions = (',\n' + indent).join(
repr('sphinx.ext.' + name)
for name in EXTENSIONS
if d.get('ext_' + name))
if extensions:
d['extensions'] = '\n' + indent + extensions + ',\n'
else:
d['extensions'] = extensions
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d['author_texescaped'] = text_type(d['author']).\
translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
# a Python string literal
for key in ('project', 'project_doc', 'project_doc_texescaped',
'author', 'author_texescaped', 'copyright',
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
if not path.isdir(d['path']):
mkdir_p(d['path'])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
mkdir_p(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
d['exclude_patterns'] = repr(d['dot'] + 'build')
mkdir_p(builddir)
mkdir_p(path.join(srcdir, d['dot'] + 'templates'))
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
f = open(fpath, 'wt', encoding='utf-8', newline=newline)
try:
f.write(content)
finally:
f.close()
else:
print('File %s already exists, skipping.' % fpath)
conf_text = QUICKSTART_CONF % d
if d['epub']:
conf_text += EPUB_CONFIG % d
if d.get('ext_intersphinx'):
conf_text += INTERSPHINX_CONFIG
write_file(path.join(srcdir, 'conf.py'), conf_text)
masterfile = path.join(srcdir, d['master'] + d['suffix'])
write_file(masterfile, MASTER_FILE % d)
if d.get('make_mode') is True:
makefile_template = MAKEFILE_NEW
batchfile_template = BATCHFILE_NEW
else:
makefile_template = MAKEFILE
batchfile_template = BATCHFILE
if d['makefile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'), makefile_template % d, u'\n')
if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'), batchfile_template % d, u'\r\n')
if silent:
return
print()
print(bold('Finished: An initial directory structure has been created.'))
print('''
You should now populate your master file %s and create other documentation
source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
Use the Makefile to build the docs, like so:
make builder
''' or '''\
Use the sphinx-build command to build the docs, like so:
sphinx-build -b builder %s %s
''' % (srcdir, builddir)) + '''\
where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
''')
def usage(argv, msg=None):
if msg:
print(msg, file=sys.stderr)
print(file=sys.stderr)
USAGE = """\
Sphinx v%s
Usage: %%prog [options] [projectdir]
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
def valid_dir(d):
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def main(argv=sys.argv):
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG,
version='Sphinx v%s' % __display_version__,
formatter=MyFormatter())
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
default=False,
help='quiet mode')
group = parser.add_option_group('Structure options')
group.add_option('--sep', action='store_true', dest='sep',
help='if specified, separate source and build dirs')
group.add_option('--dot', metavar='DOT', dest='dot',
help='replacement for dot in _templates etc.')
group = parser.add_option_group('Project basic options')
group.add_option('-p', '--project', metavar='PROJECT', dest='project',
help='project name')
group.add_option('-a', '--author', metavar='AUTHOR', dest='author',
help='author names')
group.add_option('-v', metavar='VERSION', dest='version',
help='version of project')
group.add_option('-r', '--release', metavar='RELEASE', dest='release',
help='release of project')
group.add_option('-l', '--language', metavar='LANGUAGE', dest='language',
help='document language')
group.add_option('--suffix', metavar='SUFFIX', dest='suffix',
help='source file suffix')
group.add_option('--master', metavar='MASTER', dest='master',
help='master document name')
group.add_option('--epub', action='store_true', dest='epub',
default=False,
help='use epub')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
group = parser.add_option_group('Makefile and Batchfile creation')
group.add_option('--makefile', action='store_true', dest='makefile',
default=False,
help='create makefile')
group.add_option('--no-makefile', action='store_true', dest='no_makefile',
default=False,
help='not create makefile')
group.add_option('--batchfile', action='store_true', dest='batchfile',
default=False,
help='create batchfile')
group.add_option('--no-batchfile', action='store_true', dest='no_batchfile',
default=False,
help='not create batchfile')
group.add_option('-M', '--no-use-make-mode', action='store_false', dest='make_mode',
help='not use make-mode for Makefile/make.bat')
group.add_option('-m', '--use-make-mode', action='store_true', dest='make_mode',
help='use make-mode for Makefile/make.bat')
# parse options
try:
opts, args = parser.parse_args()
except SystemExit as err:
return err.code
if len(args) > 0:
opts.ensure_value('path', args[0])
d = vars(opts)
# delete None or False value
d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
try:
if 'quiet' in d:
if not set(['project', 'author', 'version']).issubset(d):
print('''"quiet" is specified, but any of "project", \
"author" or "version" is not specified.''')
return
if set(['quiet', 'project', 'author', 'version']).issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('release', d['version'])
d2 = DEFAULT_VALUE.copy()
d2.update(dict(("ext_"+ext, False) for ext in EXTENSIONS))
d2.update(d)
d = d2
if 'no_makefile' in d:
d['makefile'] = False
if 'no_batchfile' in d:
d['batchfile'] = False
if not valid_dir(d):
print()
print(bold('Error: specified path is not a directory, or sphinx'
' files already exist.'))
print('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.')
return
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return
# decode values in d if value is a Python string literal
for key, value in d.items():
if isinstance(value, binary_type):
d[key] = term_decode(value)
generate(d)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
WhySoGeeky/DroidPot
|
venv/lib/python2.7/site-packages/sphinx/quickstart.py
|
Python
|
mit
| 51,722
|
[
"VisIt"
] |
11ba17cf1146bc85015be635acd3c3fe7b12b283d690e225049f6e6988412096
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************************
**espressopp.interaction.Morse**
******************************************
This class provides methods to compute forces and energies of
the Morse potential.
.. math::
U = \varepsilon \left(e^{-2 \alpha (r - r_{min})} - 2 e^{-\alpha (r - r_{min})}\right)
.. function:: espressopp.interaction.Morse(epsilon, alpha, rMin, cutoff, shift)
:param epsilon: (default: 1.0)
:param alpha: (default: 1.0)
:param rMin: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type alpha: real
:type rMin: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListMorse(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListMorse.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListMorse.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressMorse(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListAdressMorse.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressMorse.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressMorse(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListHadressMorse.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressMorse.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListMorse(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListMorse.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListMorse(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListMorse.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Morse, \
interaction_VerletListMorse, \
interaction_VerletListAdressMorse, \
interaction_VerletListHadressMorse, \
interaction_CellListMorse, \
interaction_FixedPairListMorse
class MorseLocal(PotentialLocal, interaction_Morse):
def __init__(self, epsilon=1.0, alpha=1.0, rMin=0.0,
cutoff=infinity, shift="auto"):
"""Initialize the local Morse object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_Morse,
epsilon, alpha, rMin, cutoff)
else:
cxxinit(self, interaction_Morse,
epsilon, alpha, rMin, cutoff, shift)
class VerletListMorseLocal(InteractionLocal, interaction_VerletListMorse):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListMorse, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class VerletListAdressMorseLocal(InteractionLocal, interaction_VerletListAdressMorse):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressMorse, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
class VerletListHadressMorseLocal(InteractionLocal, interaction_VerletListHadressMorse):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressMorse, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
class CellListMorseLocal(InteractionLocal, interaction_CellListMorse):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListMorse, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListMorseLocal(InteractionLocal, interaction_FixedPairListMorse):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListMorse, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class Morse(Potential):
'The Morse potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.MorseLocal',
pmiproperty = ['epsilon', 'alpha', 'rMin']
)
class VerletListMorse(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListMorseLocal',
pmicall = ['setPotential','getPotential']
)
class VerletListAdressMorse(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressMorseLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressMorse(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressMorseLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListMorse(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListMorseLocal',
pmicall = ['setPotential']
)
class FixedPairListMorse(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListMorseLocal',
pmicall = ['setPotential']
)
|
capoe/espressopp.soap
|
src/interaction/Morse.py
|
Python
|
gpl-3.0
| 9,678
|
[
"ESPResSo"
] |
9dfbf0423bcda5927f1ac7f496dcd24cc8982a7ce0f6b5d99d441dd935420de2
|
#!/usr/bin/env python
"""
dirac-rss-query-dtcache
Select/Add/Delete a new DownTime entry for a given Site or Service.
Usage:
dirac-rss-query-dtcache [option] <query>
Queries:
[select|add|delete]
Options:
--downtimeID= The ID of the downtime
--element= Element (Site, Service) affected by the downtime
--name= Name of the element
--startDate= Starting date of the downtime
--endDate= Ending date of the downtime
--severity= Severity of the downtime (Warning, Outage)
--description= Description of the downtime
--link= URL of the downtime announcement
--ongoing To force "select" to return the ongoing downtimes
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
import datetime
from DIRAC import gLogger, exit as DIRACExit, version
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.PrettyPrint import printTable
from DIRAC.ResourceStatusSystem.Utilities import Utils
__RCSID__ = '$Id:$'
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
( 'downtimeID=', 'ID of the downtime' ),
( 'element=', 'Element (Site, Service) affected by the downtime' ),
( 'name=', 'Name of the element' ),
( 'startDate=', 'Starting date of the downtime' ),
( 'endDate=', 'Ending date of the downtime' ),
( 'severity=', 'Severity of the downtime (Warning, Outage)' ),
( 'description=', 'Description of the downtime' ),
( 'link=', 'URL of the downtime announcement' ),
( 'ongoing', 'To force "select" to return the ongoing downtimes' )
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
usageMessage = 'DIRAC version: %s \n' % version
usageMessage += __doc__
Script.setUsageMessage( usageMessage )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if not args:
error( "Missing mandatory 'query' argument" )
elif not args[0].lower() in ( 'select', 'add', 'delete' ):
error( "Missing mandatory argument" )
else:
query = args[0].lower()
switches = dict( Script.getUnprocessedSwitches() )
# Default values
switches.setdefault( 'downtimeID', None )
switches.setdefault( 'element', None )
switches.setdefault( 'name', None )
switches.setdefault( 'startDate', None )
switches.setdefault( 'endDate', None )
switches.setdefault( 'severity', None )
switches.setdefault( 'description', None )
switches.setdefault( 'link', None )
if query in ( 'add', 'delete' ) and switches['downtimeID'] is None:
error( "'downtimeID' switch is mandatory for '%s' but found missing" % query )
if query in ( 'add', 'delete' ) and 'ongoing' in switches:
error( "'ongoing' switch can be used only with 'select'" )
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return ( args, switches )
#...............................................................................
# UTILS: for filtering 'select' output
def filterDate( selectOutput, start, end ):
'''
Selects all the downtimes that meet the constraints of 'start' and 'end' dates
'''
downtimes = selectOutput
downtimesFiltered = []
if start is not None:
try:
start = Time.fromString( start )
except:
error( "datetime formt is incorrect, pls try [%Y-%m-%d[ %H:%M:%S]]" )
start = Time.toEpoch( start )
if end is not None:
try:
end = Time.fromString( end )
except:
error( "datetime formt is incorrect, pls try [%Y-%m-%d[ %H:%M:%S]]" )
end = Time.toEpoch( end )
if start is not None and end is not None:
for dt in downtimes:
dtStart = Time.toEpoch( dt[ 'startDate' ] )
dtEnd = Time.toEpoch( dt[ 'endDate' ] )
if ( dtStart >= start ) and ( dtEnd <= end ):
downtimesFiltered.append( dt )
elif start is not None and end is None:
for dt in downtimes:
dtStart = Time.toEpoch( dt[ 'startDate' ] )
if dtStart >= start:
downtimesFiltered.append( dt )
elif start is None and end is not None:
for dt in downtimes:
dtEnd = Time.toEpoch( dt[ 'endDate' ] )
if dtEnd <= end:
downtimesFiltered.append( dt )
else:
downtimesFiltered = downtimes
return downtimesFiltered
def filterOngoing( selectOutput ):
'''
Selects all the ongoing downtimes
'''
downtimes = selectOutput
downtimesFiltered = []
currentDate = Time.toEpoch( Time.dateTime() )
for dt in downtimes:
dtStart = Time.toEpoch( dt[ 'startDate' ] )
dtEnd = Time.toEpoch( dt[ 'endDate' ] )
if ( dtStart <= currentDate ) and ( dtEnd >= currentDate ):
downtimesFiltered.append( dt )
return downtimesFiltered
def filterDescription( selectOutput, description ):
'''
Selects all the downtimes that match 'description'
'''
downtimes = selectOutput
downtimesFiltered = []
if description is not None:
for dt in downtimes:
if description in dt[ 'description' ]:
downtimesFiltered.append( dt )
else:
downtimesFiltered = downtimes
return downtimesFiltered
#...............................................................................
# Utils: for formatting query output and notifications
def error( msg ):
'''
Format error messages
'''
subLogger.error( "\nERROR:" )
subLogger.error( "\t" + msg )
subLogger.error( "\tPlease, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
def confirm( query, matches ):
'''
Format confirmation messages
'''
subLogger.notice( "\nNOTICE: '%s' request successfully executed ( matches' number: %s )! \n" % ( query, matches ) )
def tabularPrint( table ):
columns_names = table[0].keys()
records = []
for row in table:
record = []
for k,v in row.items():
if isinstance( v, datetime.datetime ):
record.append( Time.toString( v ) )
elif v is None:
record.append( '' )
else:
record.append( v )
records.append( record )
output = printTable( columns_names, records, numbering = False,
columnSeparator = " | ", printOut = False )
subLogger.notice( output )
#...............................................................................
def select( switchDict ):
'''
Given the switches, request a query 'select' on the ResourceManagementDB
that gets from DowntimeCache all rows that match the parameters given.
'''
rmsClient = ResourceManagementClient()
meta = { 'columns' : [ 'DowntimeID', 'Element', 'Name', 'StartDate', 'EndDate',
'Severity', 'Description', 'Link', 'DateEffective' ] }
result = { 'output': None, 'OK': None, 'Message': None, 'match': None }
output = rmsClient.selectDowntimeCache( downtimeID = switchDict[ 'downtimeID' ],
element = switchDict[ 'element' ],
name = switchDict[ 'name' ],
#startDate = switchDict[ 'startDate' ],
#endDate = switchDict[ 'endDate' ],
severity = switchDict[ 'severity' ],
#description = switchDict[ 'description' ],
#link = switchDict[ 'link' ],
#dateEffective = switchDict[ 'dateEffective' ],
meta = meta )
if not output['OK']:
return output
result['output'] = [ dict( zip( output[ 'Columns' ], dt ) ) for dt in output[ 'Value' ] ]
if 'ongoing' in switchDict:
result['output'] = filterOngoing( result['output'] )
else:
result['output'] = filterDate( result['output'], switchDict[ 'startDate' ], switchDict[ 'endDate' ] )
result['output'] = filterDescription( result['output'], switchDict[ 'description' ] )
result['match'] = len( result['output'] )
result['OK'] = True
result['message'] = output['Message'] if 'Message' in output else None
return result
def add( switchDict ):
'''
Given the switches, request a query 'addOrModify' on the ResourceManagementDB
that inserts or updates-if-duplicated from DowntimeCache.
'''
rmsClient = ResourceManagementClient()
result = { 'output': None, 'OK': None, 'Message': None, 'match': None }
output = rmsClient.addOrModifyDowntimeCache( downtimeID = switchDict[ 'downtimeID' ],
element = switchDict[ 'element' ],
name = switchDict[ 'name' ],
startDate = switchDict[ 'startDate' ],
endDate = switchDict[ 'endDate' ],
severity = switchDict[ 'severity' ],
description = switchDict[ 'description' ],
link = switchDict[ 'link' ]
#dateEffective = switchDict[ 'dateEffective' ]
)
if not output['OK']:
return output
if output['Value']:
result['match'] = int( output['Value'] )
result['OK'] = True
result['message'] = output['Message'] if 'Message' in output else None
return result
def delete( switchDict ):
'''
Given the switches, request a query 'delete' on the ResourceManagementDB
that deletes from DowntimeCache all rows that match the parameters given.
'''
rmsClient = ResourceManagementClient()
result = { 'output': None, 'OK': None, 'Message': None, 'match': None }
output = rmsClient.deleteDowntimeCache( downtimeID = switchDict[ 'downtimeID' ],
element = switchDict[ 'element' ],
name = switchDict[ 'name' ],
startDate = switchDict[ 'startDate' ],
endDate = switchDict[ 'endDate' ],
severity = switchDict[ 'severity' ],
description = switchDict[ 'description' ],
link = switchDict[ 'link' ]
#dateEffective = switchDict[ 'dateEffective' ]
)
if not output['OK']:
return output
if output['Value']:
result['match'] = int( output['Value'] )
result['OK'] = True
result['Message'] = output['Message'] if 'Message' in output else None
return result
#...............................................................................
def run( args, switchDict ):
'''
Main function of the script
'''
query = args[0]
# it exectues the query request: e.g. if it's a 'select' it executes 'select()'
# the same if it is add, delete
result = eval( query + '( switchDict )' )
if result[ 'OK' ]:
if query == 'select' and result['match'] > 0:
tabularPrint( result[ 'output' ] )
confirm( query, result['match'] )
else:
error( result[ 'Message' ] )
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger( __file__ )
#Script initialization
registerSwitches()
registerUsageMessage()
args, switchDict = parseSwitches()
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient')
#Run script
run( args, switchDict )
#Bye
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Andrew-McNab-UK/DIRAC
|
ResourceStatusSystem/scripts/dirac-rss-query-dtcache.py
|
Python
|
gpl-3.0
| 12,464
|
[
"DIRAC"
] |
ad52d668763403a7423610de9cfb8864007af87ef3c8e88cbe5b21b0bd8d876d
|
# -*- coding: utf-8 -*-
"""
Insert temporary allocations and deallocations into the IR.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import interp, visit, transform, Op, Builder, ops
from pykit import types
def insert_allocations(func, env):
b = Builder(func)
# IR positions and list of ops
positions = dict((op, idx) for idx, op in enumerate(func.ops))
oplist = list(func.ops)
for op in func.ops:
if op.opcode == 'ckernel':
ckernel, args = op.args
alloc = Op('alloc', op.type, args=[])
# TODO: Insert alloc in args list of ckernel
# Replace uses of ckernel with temporary allocation
op.replace_uses(alloc)
op.set_args([ckernel, [alloc] + args])
# Emit allocation before first use
b.position_before(op)
b.emit(alloc)
# Emit deallocation after last use, unless we are returning
# the result
idx = max(positions[u] for u in func.uses[alloc])
last_op = oplist[idx]
if not last_op.opcode == 'ret':
b.position_after(last_op)
dealloc = Op('dealloc', types.Void, [alloc])
b.emit(dealloc)
return func, env
run = insert_allocations
|
XinSong/blaze
|
blaze/compute/air/frontend/allocation.py
|
Python
|
bsd-3-clause
| 1,323
|
[
"VisIt"
] |
4640c7895e547a5cf77f11b731532d746677f994074e47426ec289c4d3b1fda2
|
#!/usr/bin/env python
# A python script to parse log file to generate cross-reference information
# and print out the cross-reference information based on the input.
# To run the script, please do
# python CrossReferenceConsole.py -l <logFileDir> -r <VistA-Repository-Dir> -d <Dox-Repository-Dir>
# enter quit to exit
#---------------------------------------------------------------------------
# Copyright 2011 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import sys
from datetime import datetime, date, time
import csv
from LogManager import logger, initConsoleLogging
import logging
from CrossReferenceBuilder import CrossReferenceBuilder
routineName = re.compile("^R:(?P<name>[^ ]+)")
packageName = re.compile("^P:(?P<name>.*)")
globalName = re.compile("^G:(?P<name>.*)")
#===============================================================================
# interface to generated the output based on a routine, global, package
#===============================================================================
class RoutineVisit:
def visitRoutine(self, routine, outputDir=None):
pass
class PackageVisit:
def visitPackage(self, package, outputDir=None):
pass
#===============================================================================
# Default implementation of the routine Visit
#===============================================================================
class DefaultRoutineVisit(RoutineVisit):
def visitRoutine(self, routine, outputDir=None):
routine.printResult()
#===============================================================================
#
#===============================================================================
class GraphvizCallGraphRoutineVisit(CallerGraphParser.RoutineVisit):
def visitRoutine(self, routine, outputDir):
pass
# generateRoutineDependencyGraph(routine, outputDir)
#===============================================================================
#
#===============================================================================
class GraphvizCallerGraphRoutineVisit(CallerGraphParser.RoutineVisit):
def visitRoutine(self, routine, outputDir):
pass
# generateRoutineDependencyGraph(routine, outputDir, False)
#===============================================================================
#
#===============================================================================
class GraphvizPackageDependencyVisit(CallerGraphParser.PackageVisit):
def visitPackage(self, package, outputDir):
generatePackageDependencyGraph(package, outputDir, True)
#===============================================================================
#
#===============================================================================
class GraphvizPackageDependentVisit(CallerGraphParser.PackageVisit):
def visitPackage(self, package, outputDir):
generatePackageDependencyGraph(package, outputDir, False)
#===============================================================================
#
#===============================================================================
class CplusRoutineVisit(CallerGraphParser.RoutineVisit):
def visitRoutine(self, routine, outputDir):
calledRoutines = routine.getCalledRoutines()
if not calledRoutines or len(calledRoutines) == 0:
logger.warn("No called Routines found! for package:%s" % routineName)
return
routineName = routine.getName()
if not routine.getPackage():
logger.error("ERROR: package: %s does not belongs to a package" % routineName)
return
packageName = routine.getPackage().getName()
try:
dirName = os.path.join(outputDir, packageName)
if not os.path.exists(dirName):
os.makedirs(dirName)
except OSError, e:
logger.error("Error making dir %s : Error: %s" % (dirName, e))
return
outputFile = open(os.path.join(dirName, routineName), 'w')
outputFile.write(("/*! \\namespace %s \n") % (packageName))
outputFile.write("*/\n")
outputFile.write("namespace %s {" % packageName)
outputFile.write("/* Global Vars: */\n")
for var in routine.getGlobalVariables():
outputFile.write(" int %s;\n" % var)
outputFile.write("\n")
outputFile.write("/* Naked Globals: */\n")
for var in routine.getNakeGlobals:
outputFile.write(" int %s;\n" % var)
outputFile.write("\n")
outputFile.write("/* Marked Items: */\n")
for var in routine.getMarkedItems():
outputFile.write(" int %s;\n" % var)
outputFile.write("\n")
outputFile.write("/*! \callgraph\n")
outputFile.write("*/\n")
outputFile.write ("void " + self.name + "(){\n")
outputFile.write("/* Local Vars: */\n")
for var in routine.getLocalVariables():
outputFile.write(" int %s; \n" % var)
outputFile.write("/* Called Routines: */\n")
for var in calledRoutines:
outputFile.write(" %s ();\n" % var)
outputFile.write("}\n")
outputFile.write("}// end of namespace")
outputFile.close()
#===============================================================================
# Default implementation of the package Visit
#===============================================================================
class DefaultPackageVisit(PackageVisit):
def visitPackage(self, package, outputDir=None):
package.printResult()
def printRoutine(crossRef, routineName, visitor=DefaultRoutineVisit()):
routine = crossRef.getRoutineByName(routineName)
if routine:
visitor.visitRoutine(routine)
else:
logger.error ("Routine: %s Not Found!" % routineName)
def printPackage(crossRef, packageName, visitor=DefaultPackageVisit()):
package = crossRef.getPackageByName(packageName)
if package:
visitor.visitPackage(package)
else:
logger.error ("Package: %s Not Found!" % packageName)
def printGlobal(crossRef, globalName, visitor=None):
globalVar = crossRef.getGlobalByName(globalName)
if globalVar:
if visitor:
visitor.visitGlobal(globalVar)
else:
globalVar.printResult()
else:
logger.error ("Global: %s Not Found!" % globalName)
def findRoutinesWithMostOfCallers(crossRef):
maxCallerRoutine = None
maxCalledRoutine = None
for routine in crossRef.getAllRoutines().itervalues():
if not maxCallerRoutine:
maxCallerRoutine = routine
if not maxCalledRoutine:
maxCalledRoutine = routine
if routine.getTotalCaller() > maxCallerRoutine.getTotalCaller():
maxCallerRoutine = routine
if routine.getTotalCalled() > maxCalledRoutine.getTotalCalled():
maxCalledRoutine = routine
print ("Max Caller Routine is %s, package: %s, total Caller: %d" % (maxCallerRoutine,
maxCallerRoutine.getPackage(),
maxCallerRoutine.getTotalCaller()))
print ("Max Called Routine is %s, package: %s, total Called: %d" % (maxCalledRoutine,
maxCalledRoutine.getPackage(),
maxCalledRoutine.getTotalCalled()))
def findPackagesWithMostOfDependency(crossRef):
maxPackageDependency = None
maxPackageDependent = None
for package in crossRef.getAllPackages().itervalues():
if not maxPackageDependency:
maxPackageDependency = package
if not maxPackageDependent:
maxPackageDependent = package
if len(package.getPackageRoutineDependencies()) > len(maxPackageDependency.getPackageRoutineDependencies()):
maxPackageDependency = package
if len(package.getPackageRoutineDependents()) > len(maxPackageDependent.getPackageRoutineDependents()):
maxPackageDependent = package
print ("Max Dependency package: %s, total Dependencies: %d" % (maxPackageDependency.getName(),
len(maxPackageDependency.getPackageRoutineDependencies())))
print ("Max Dependent package: %s, total Dependents: %d" % (maxPackageDependent.getName(),
len(maxPackageDependent.getPackageRoutineDependents())))
def normalizePackageName(packageName):
newName = packageName.replace(' ', '_')
return newName.replace('-', "_")
# this is too big to generate the whole graph
def generateAllPackageDependencyGraph(allPackages, outputFile):
output = open(outputFile, 'w')
output.write("digraph allPackage{\n")
output.write("\tnode [shape=box fontsize=11];\n")
output.write("\tnodesep=0.45;\n")
for name in allPackages.iterkeys():
output.write("\t%s [label=\"%s\"];\n" % (normalizePackageName(name), name))
for package in allPackages.itervalues():
for depPack in package.getPackageDependencies().iterkeys():
output.write("\t %s->%s;\n" % (normalizePackageName(package.getName()),
normalizePackageName(depPack.getName())))
output.write("}\n")
def generateAllPackageDependencyList(allPackages):
dependentList = set()
for package in allPackages.itervalues():
for depPack in package.getPackageDependencies().iterkeys():
name = "%s-%s" % (package, depPack)
name1 = "%s-%s" % (depPack, package)
if name not in dependentList and name1 not in dependentList:
dependentList.add(name)
print ("Total # items is %d" % len(dependentList))
print (sorted(dependentList))
def printAllPercentRoutines(crossReference, outputFile=None):
allRoutines = crossReference.getAllPercentRoutine()
sortedRoutine = sorted(allRoutines)
index = 0
print ("Total # of Percent routines: %d" % len(allRoutines))
if outputFile:
outputFile = open(outputFile, "wb")
csvWriter = csv.writer(outputFile)
for routineName in sortedRoutine:
sys.stdout.write(" %s " % routineName)
if outputFile:
csvWriter.writerow([routineName, "", ""])
if (index + 1) % 10 == 0:
sys.stdout.write ("\n")
index += 1
sys.stdout.write("\n")
def printOrphanGlobals(crossRef):
orphanGlobals = crossRef.getOrphanGlobals()
sortedGlobals = sorted(orphanGlobals)
index = 0
topLevel = dict()
topLevelRegex = re.compile("(?P<Name>^\^[^ \(]+)\(?(?P<Index>.*)")
for globalName in sortedGlobals:
result = topLevelRegex.search(globalName)
if result:
varName = result.group('Name')
index = result.group('Index')
if varName not in topLevel:
topLevel[varName] = set()
topLevel[varName].add(index)
else:
sys.stderr.write("Could not parse global %s\n" % globalName)
continue
#sys.stdout.write(" %s " % globalName)
#if (index + 1) % 10 == 0:
# sys.stdout.write("\n")
#index += 1
print ("Total # of top level orphan globals: %d" % len(topLevel))
topLevelFileMan = set()
for key in sorted(topLevel.keys()):
sys.stdout.write("%s %s\n" % (key,topLevel[key]))
globalVar = crossRef.getGlobalByName(key)
if globalVar:
topLevelFileMan.add(globalVar)
sys.stdout.write("\n")
print ("Total # of top level FileMan File: %d\n" % len(topLevelFileMan))
sys.stdout.write("%s\n" % (topLevelFileMan))
sys.stdout.write("\n")
def printUsage():
print ("Please enter quit to exit")
print ("Please enter help for usage")
print ("please enter orphan_routine to print orphan routines")
print ("please enter orphan_global to print orphan globals")
print ("please enter max_call to print routines with max caller and max called routines")
print ("please enter max_dep to print packages with max dependencies and max dependents")
print ("please enter gen_allpack to generate all packages dependency list")
print ("please enter all_percent to print all routines start with %")
print ("please enter R:<routineName> to print all information related to a routine")
print ("please enter G:<globalname> to print all information related to a global")
print ("please enter P:<packageName> to print all information related to a package")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VistA Cross-Reference information Finder')
parser.add_argument('-l', required=True, dest='logFileDir',
help='Input XINDEX log files directory generated by CTest, nomally under'
'CMAKE_BUILD_DIR/Docs/CallerGraph/')
parser.add_argument('-r', required=True, dest='repositDir',
help='VistA Git Repository Directory')
parser.add_argument('-d', required=True, dest='docRepositDir',
help='VistA Cross-Reference Git Repository Directory')
parser.add_argument('-f', dest='fileSchemaDir',
help='VistA File Man Schema log Directory')
result = vars(parser.parse_args());
initConsoleLogging()
crossRef = CrossReferenceBuilder().buildCrossReference(result['logFileDir'],
result['repositDir'],
result['docRepositDir'],
result['fileSchemaDir'])
# read the user input from the terminal
isExit = False
printUsage()
while not isExit:
var = raw_input("Please enter the routine Name or package Name:")
if (var == 'quit'):
isExit = True
continue
if (var == 'orphan_routine'):
for routine in orphanRoutines:
print routine
continue
if (var == 'orphan_global'):
printOrphanGlobals(logParser)
continue
if var == "max_call":
findRoutinesWithMostOfCallers(crossRef)
continue
if var == "max_dep":
findPackagesWithMostOfDependency(crossRef)
continue
if var == "gen_allpack":
generateAllPackageDependencyList(crossRef.getAllPackages())
continue
if var == "all_percent":
printAllPercentRoutines(crossRef)
continue
if var == "help":
printUsage()
continue
result = routineName.search(var)
if result:
printRoutine(crossRef, result.group('name'))
continue
result = packageName.search(var)
if result:
printPackage(crossRef, result.group('name').strip())
continue
result = globalName.search(var)
if result:
printGlobal(crossRef, result.group('name').strip())
continue
|
JimDeanSpivey/ATF-for-Vista-FOIA
|
Dox/PythonScripts/CrossReferenceConsole.py
|
Python
|
apache-2.0
| 15,774
|
[
"VisIt"
] |
c6b56707d50d6e5fab1e7bbc0daaab0566e425eb3fe73d7e5e924f90c1bd2000
|
# $Id$
__revision__ = '$Rev$'
import sys
from itcc.molecule import read
from itcc.tinker import parameter, analyze, tinker, molparam
from itcc.torsionfit import tools, parmfit
def printefit(datfname, idxfname, param):
'''Only print E_fit'''
import csv
fnames, enes, weights = tools.readdat(datfname)
idxs, folds = parmfit.readidx(idxfname)
params = parmfit.getparams(idxs, param)
writer = csv.writer(sys.stdout)
writer.writerow(['Filename', 'weight', 'E_qm', 'E_mm', 'E_tor', 'E_fit'])
for fname, E_qm, weight in zip(fnames, enes, weights):
mol = read.readxyz(file(fname))
tors = analyze.gettorsbytype(mol, idxs)
newmol, E_mm = tinker.minimize_file(fname, param)
E_tor = parmfit.getetor(newmol, tors, params)
E_fit = E_qm - E_mm + E_tor
writer.writerow([fname, weight, E_qm, E_mm, E_tor, E_fit])
def main():
if len(sys.argv) != 4:
import os.path
print >> sys.stderr, 'Usage: %s datfname idxfname param' % \
os.path.basename(sys.argv[0])
sys.exit(1)
printefit(sys.argv[1], sys.argv[2], sys.argv[3])
if __name__ == '__main__':
main()
|
lidaobing/itcc
|
itcc/torsionfit/printefit.py
|
Python
|
gpl-3.0
| 1,167
|
[
"TINKER"
] |
08295163fd7e12eaf93b319a438cd342d2a279ca146cb50e38503a4809aa3927
|
'''
Hodgkin-Huxley equations (1952).
Spikes are recorded along the axon, and then velocity is calculated.
'''
import os
import matplotlib
matplotlib.use('Agg')
from brian2 import *
import brian2cuda # cuda_standalone device
from scipy import stats
name = os.path.basename(__file__).replace('.py', '')
codefolder = os.path.join('code', name)
print('runing example {}'.format(name))
print('compiling model in {}'.format(codefolder))
set_device('cuda_standalone', build_on_run=False) # multiple runs require this change (see below)
defaultclock.dt = 0.01*ms
morpho = Cylinder(length=10*cm, diameter=2*238*um, n=1000, type='axon')
El = 10.613*mV
ENa = 115*mV
EK = -12*mV
gl = 0.3*msiemens/cm**2
gNa0 = 120*msiemens/cm**2
gK = 36*msiemens/cm**2
# Typical equations
eqs = '''
# The same equations for the whole neuron, but possibly different parameter values
# distributed transmembrane current
Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2
I : amp (point current) # applied current
dm/dt = alpham * (1-m) - betam * m : 1
dn/dt = alphan * (1-n) - betan * n : 1
dh/dt = alphah * (1-h) - betah * h : 1
alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz
betam = 4 * exp(-v/(18*mV))/ms : Hz
alphah = 0.07 * exp(-v/(20*mV))/ms : Hz
betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz
alphan = (0.01/mV) * (-v+10*mV) / (exp((-v+10*mV) / (10*mV)) - 1)/ms : Hz
betan = 0.125*exp(-v/(80*mV))/ms : Hz
gNa : siemens/meter**2
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, method="exponential_euler",
refractory="m > 0.4", threshold="m > 0.5",
Cm=1*uF/cm**2, Ri=35.4*ohm*cm)
neuron.v = 0*mV
neuron.h = 1
neuron.m = 0
neuron.n = .5
neuron.I = 0*amp
neuron.gNa = gNa0
M = StateMonitor(neuron, 'v', record=True)
spikes = SpikeMonitor(neuron)
run(50*ms, report='text')
neuron.I[0] = 1*uA # current injection at one end
run(3*ms)
neuron.I = 0*amp
run(50*ms, report='text', profile=True)
# cf. https://brian2.readthedocs.io/en/stable/user/computation.html#multiple-run-calls
device.build( directory=codefolder, compile = True, run = True, debug=False)
print(profiling_summary())
# Calculation of velocity
slope, intercept, r_value, p_value, std_err = stats.linregress(spikes.t/second,
neuron.distance[spikes.i]/meter)
print("Velocity = %.2f m/s" % slope)
subplot(211)
for i in range(10):
plot(M.t/ms, M.v.T[:, i*100]/mV)
ylabel('v')
subplot(212)
plot(spikes.t/ms, spikes.i*neuron.length[0]/cm, '.k')
plot(spikes.t/ms, (intercept+slope*(spikes.t/second))/cm, 'r')
xlabel('Time (ms)')
ylabel('Position (cm)')
#show()
plotfolder = 'plots'
if not os.path.exists(plotfolder):
os.mkdir(plotfolder)
plotpath = os.path.join(plotfolder, '{}.png'.format(name))
savefig(plotpath)
print('plot saved in {}'.format(plotpath))
print('the generated model in {} needs to removed manually if wanted'.format(codefolder))
print('DEBUG: SAVING RESULTS NPZ FILE INTO PLOTS FOLDER')
savez('plots/'+name+'_results.npz', time=spikes.t/ms, black=spikes.i*neuron.length[0]/cm, red=(intercept+slope*(spikes.t/second))/cm)
|
brian-team/brian2cuda
|
examples/compartmental/hh_with_spikes_cuda.py
|
Python
|
gpl-2.0
| 3,151
|
[
"NEURON"
] |
57004620860fda252012e1ada8bc4c717f41cd38171bdfcf36a5f3a9d2ada87e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from core import MM_InfTauInterpolatedChannel
from morphforge.units import qty
from mmwriter_infatauinterpolated import NEURONChlWriterInfTauInterpolated
from morphforge.simulation.neuron.hocmodbuilders import MM_ModFileWriterBase
from morphforge.simulation.neuron.hocmodbuilders import HocModUtils
from morphforge.simulation.neuron import NEURONChl_Base
from morphforge.constants.standardtags import StandardTags
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordable, \
NEURONRecordableOnLocation
from morphforge import units
class NEURONChl_InfTauInterpolated_Record(NEURONRecordableOnLocation):
def __init__(self, alphabeta_chl, modvar, **kwargs):
super(NEURONChl_InfTauInterpolated_Record,
self).__init__(**kwargs)
self.alphabeta_chl = alphabeta_chl
self.modvar = modvar
def build_mod(self, modfile_set):
pass
def build_hoc(self, hocfile_obj):
HocModUtils.create_record_from_modfile(
hocfile_obj,
vecname='RecVec%s' % self.name,
cell_location=self.cell_location,
modvariable=self.modvar,
mod_neuronsuffix=self.alphabeta_chl.get_neuron_suffix(),
recordobj=self,
)
class NEURONChl_InfTauInterpolated_CurrentDensityRecord(NEURONChl_InfTauInterpolated_Record):
def __init__(self, **kwargs):
super(NEURONChl_InfTauInterpolated_CurrentDensityRecord,
self).__init__(modvar='i', **kwargs)
def get_unit(self):
return units.parse_unit_str('mA/cm2')
def get_std_tags(self):
return [StandardTags.CurrentDensity]
class NEURONChl_InfTauInterpolated_ConductanceDensityRecord(NEURONChl_InfTauInterpolated_Record):
def __init__(self, **kwargs):
super(NEURONChl_InfTauInterpolated_ConductanceDensityRecord,
self).__init__(modvar='g', **kwargs)
def get_unit(self):
return units.parse_unit_str('S/cm2')
def get_std_tags(self):
return [StandardTags.ConductanceDensity]
class NEURONChl_InfTauInterpolated_StateVariableRecord(NEURONChl_InfTauInterpolated_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_InfTauInterpolated_StateVariableRecord,
self).__init__(modvar=state, **kwargs)
def get_unit(self):
return units.dimensionless
def get_std_tags(self):
return [StandardTags.StateVariable]
class NEURONChl_InfTauInterpolated_StateVariableTauRecord(NEURONChl_InfTauInterpolated_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_InfTauInterpolated_StateVariableTauRecord,
self).__init__(modvar=state + 'tau', **kwargs)
def get_unit(self):
return units.ms
def get_std_tags(self):
return [StandardTags.StateTimeConstant]
class NEURONChl_InfTauInterpolated_StateVariableInfRecord(NEURONChl_InfTauInterpolated_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_InfTauInterpolated_StateVariableInfRecord,
self).__init__(modvar=state + 'inf', **kwargs)
def get_unit(self):
return units.dimensionless
def get_std_tags(self):
return [StandardTags.StateSteadyState]
class NEURONChl_InfTauInterpolated(MM_InfTauInterpolatedChannel, NEURONChl_Base):
def __init__(self, **kwargs):
super( NEURONChl_InfTauInterpolated, self).__init__( **kwargs)
def get_recordable(self, what, **kwargs):
recorders = {
MM_InfTauInterpolatedChannel.Recordables.CurrentDensity: NEURONChl_InfTauInterpolated_CurrentDensityRecord,
MM_InfTauInterpolatedChannel.Recordables.ConductanceDensity: NEURONChl_InfTauInterpolated_ConductanceDensityRecord,
MM_InfTauInterpolatedChannel.Recordables.StateVar: NEURONChl_InfTauInterpolated_StateVariableRecord,
MM_InfTauInterpolatedChannel.Recordables.StateVarSteadyState: NEURONChl_InfTauInterpolated_StateVariableInfRecord,
MM_InfTauInterpolatedChannel.Recordables.StateVarTimeConstant: NEURONChl_InfTauInterpolated_StateVariableTauRecord,
}
return recorders[what](alphabeta_chl=self, **kwargs)
def build_hoc_section(self, cell, section, hocfile_obj, mta):
return NEURONChlWriterInfTauInterpolated.build_hoc_section(cell=cell, section=section, hocfile_obj=hocfile_obj, mta=mta)
def create_modfile(self, modfile_set):
NEURONChlWriterInfTauInterpolated.build_mod(alphabeta_chl=self, modfile_set=modfile_set)
def get_mod_file_changeables(self):
# If this fails, then the attirbute probably needs to be added to the list below:
change_attrs = set(['conductance', 'eqn','conductance','statevars_new','reversalpotential', ])
assert set(self.__dict__) == set(['_name','_simulation', 'mm_neuronNumber', 'cachedNeuronSuffix']) | change_attrs
# attrs = ['name','ion','eqn','conductance','statevars_new','reversalpotential',]
return dict([(a, getattr(self, a)) for a in change_attrs])
# Register the channel
NEURONEnvironment.channels.register_plugin(MM_InfTauInterpolatedChannel, NEURONChl_InfTauInterpolated)
|
mikehulluk/morphforge
|
src/morphforgecontrib/simulation/channels/inftauinterpolated/neuron.py
|
Python
|
bsd-2-clause
| 6,808
|
[
"NEURON"
] |
ffa32d9d10693a4481cb6e71c32a1349cbaadba13ca57d33a29c8fce1b21e8b1
|
"""Linear Predictive Coding analysis and resynthesis for audio."""
import numpy as np
import scipy.signal
def lpcfit(x, p=12, h=128, w=None, overlaps=True):
"""Perform LPC analysis of short-time windows of a waveform.
Args:
x: 1D np.array containing input audio waveform.
p: int, order of LP models to fit.
h: int, hop in samples between successive short-time windows.
w: int, analysis window length. Defaults to 2 x h.
overlaps: bool, if true, residuals are overlap-added between
windows (for a continuous excitation), otherwise only the
residual for each hop portion is kept (for perfect reconstruction).
Returns:
a: np.array of (n_frames, p + 1) containing the LPC filter coefficients for
each frame.
g: np.array of (n_frames,) giving the gain for each frame.
e: np.array of (n_frames * h + (w - h),) giving the normalized-energy
excitation (residual).
"""
if not w:
w = 2 * h
npts = x.shape[0]
nhops = int(npts/h)
# Pad x with zeros so that we can extract complete w-length windows from it.
x = np.hstack([np.zeros(int((w-h)/2)), x, np.zeros(int(w-h/2))])
a = np.zeros((nhops, p+1))
g = np.zeros(nhops)
if overlaps:
e = np.zeros((nhops - 1) * h + w)
else:
e = np.zeros(npts)
# Pre-emphasis
pre = [1, -0.9]
x = scipy.signal.lfilter(pre, 1 , x)
for hop in np.arange(nhops):
# Extract segment of signal.
xx = x[hop * h + np.arange(w)]
# Apply hanning window
wxx = xx * np.hanning(w)
# Form autocorrelation (calculates *way* too many points)
rxx = np.correlate(wxx, wxx, 'full')
# Extract just the points we need (middle p+1 points).
rxx = rxx[w - 1 + np.arange(p + 1)]
# Setup the normal equations
coeffs = np.dot(np.linalg.inv(scipy.linalg.toeplitz(rxx[:-1])), rxx[1:])
# Calculate residual by filtering windowed xx
aa = np.hstack([1.0, -coeffs])
if overlaps:
rs = scipy.signal.lfilter(aa, 1, wxx)
else:
rs = scipy.signal.lfilter(aa, 1, xx[int((w - h) / 2) + np.arange(h)])
G = np.sqrt(np.mean(rs**2))
# Save filter, gain and residual
a[hop] = aa
g[hop] = G
if overlaps:
e[hop * h + np.arange(w)] += rs / G
else:
e[hop *h + np.arange(h)] = rs / G
# Throw away first (win-hop)/2 pts if in overlap mode
# for proper synchronization of resynth
if overlaps:
e = e[int((w - h) / 2):]
return a, g, e
def lpcsynth(a, g, e=None, h=128, overlaps=True):
"""Resynthesize a short-time LPC analysis to audio.
Args:
a: np.array of (nframes, order + 1) giving the per-frame LPC filter
coefficients.
g: np.array of (nframes,) giving the gain for each frame.
e: np.array of (nframes * hop + (window - hop)) giving the excitation
signal to feed into the filters. If a scalar, an impulse train with the
specified period is used. Defaults to Gaussian white noise.
h: int, hop between successive reconstruction frames, in samples.
Reconstruction window is always 2 * h.
overlaps: bool. If true, successive frames are windowed and overlap-
added. If false, we assume e contains exact residuals for each
window, so reconstructions are similarly truncated and concatenated.
Returns:
1D np.array of the resynthesized waveform.
"""
w = 2 * h
nhops, p = a.shape
npts = nhops * h + w
# Excitation needs extra half-window at the end if in overlap mode
nepts = npts + overlaps*(w - h)
if e is None:
e = np.random.randn(nepts)
elif type(e) == int:
period = e;
e = np.sqrt(period) * (
np.mod(np.arange(nepts), period) == 0).astype(float)
else:
nepts = e.shape[0]
npts = nepts + h
# Try to make sure we don't run out of e (in ov mode)
e = np.hstack([e, np.zeros(w)])
d = np.zeros(npts)
for hop in np.arange(nhops):
hbase = hop * h
#print d.shape, hbase, hop, nhops
oldbit = d[hbase + np.arange(h)]
aa = a[hop, :]
G = g[hop]
if overlaps:
d[hbase + np.arange(w)] += np.hanning(w) * (
G * scipy.signal.lfilter([1], aa, e[hbase + np.arange(w)]))
else:
d[hbase + np.arange(h)] = G * scipy.signal.lfilter(
1, aa, e[hbase + np.arange(h)])
# De-emphasis (must match pre-emphasis in lpcfit)
pre = [1, -0.9]
d = scipy.signal.lfilter([1], pre, d)
return d
def lpcBHenc(E, H=None, W=256, viz=False):
"""
% P = lpcBHenc(E,H,W,viz) Encode LPC residual as buzz/hiss pitch periods
% E is a residual from LPC encoding. P is an encoding
% which, for every H samples, returns an integer pitch period
% or 0 for frames judged as noisy. Pitch is found via autocorrelation
% over a window of W points
% 2001-03-19 dpwe@ee.columbia.edu
"""
if not H:
H = int(W / 2)
nhops = int(E.shape[0]/H)
P = np.zeros(nhops)
pmin = 2
pmax = 127
pdthresh = 0.2
# Pad so that each W-point frame is centered around hop * H.
ee = np.hstack([np.zeros(W / 2), E, np.zeros(W / 2)])
for hop in np.arange(nhops):
xx = ee[hop * H + np.arange(W)]
rxx = np.correlate(xx, xx, 'full')[W - 1 + np.arange(pmin, pmax)]
period = pmin + np.argmax(rxx)
rratio = np.max(rxx)/rxx[0]
#if viz:
# disp(['hop ',num2str(hop),' pd ',num2str(pd),' rrat ',num2str(rratio)]);
# subplot(211); plot(xx);
# subplot(212); plot(rxx); pause
if rratio > pdthresh:
P[hop] = period
else:
P[hop] = 0 # Noisy period
return P
def lpcBHdec(P, H=128):
"""
% E = lpcBHdec(P,H) Decode LPC residual encoded as pitch periods
% P is a vector pitch periods from lpcresenc. Reconstruct a
% stylized excitation vector E with a hop size H.
% 2001-03-19 dpwe@ee.columbia.edu
"""
nhops = P.shape[0]
npts = H * nhops
E = np.zeros(npts)
phs = 0 # Current phase as proportion of a cyle (new pulse at 1.0)
for hop in np.arange(nhops):
period = P[hop]
base = H * (hop - 1)
if period == 0:
E[base + np.arange(H)] = np.random.randn(H)
else:
pt = 0;
# Steps to next pulse
remsteps = int(np.round((1 - phs) * period))
while (pt + remsteps) < H:
pt = pt + remsteps
E[base + pt] = np.sqrt(period) # so rms is 1
remsteps = period
# Store residual phase
phs = (H - pt)/float(period)
return E
|
dpwe/elene4896
|
L06/lpc.py
|
Python
|
mit
| 6,369
|
[
"Gaussian"
] |
45fed0d62cc605bbb1a28d3039ff47d73115e5466802ccfe34f97b99f30234d8
|
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``en_US`` locale.
Word list is based on the source(s) below, and some words have been removed
to make the word list appropriate for public testing.
Sources:
- http://www.ef.edu/english-resources/english-vocabulary/top-1000-words/
"""
word_list = (
"a",
"ability",
"able",
"about",
"above",
"accept",
"according",
"account",
"across",
"act",
"action",
"activity",
"actually",
"add",
"address",
"administration",
"admit",
"adult",
"affect",
"after",
"again",
"against",
"age",
"agency",
"agent",
"ago",
"agree",
"agreement",
"ahead",
"air",
"all",
"allow",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"American",
"among",
"amount",
"analysis",
"and",
"animal",
"another",
"answer",
"any",
"anyone",
"anything",
"appear",
"apply",
"approach",
"area",
"argue",
"arm",
"around",
"arrive",
"art",
"article",
"artist",
"as",
"ask",
"assume",
"at",
"attack",
"attention",
"attorney",
"audience",
"author",
"authority",
"available",
"avoid",
"away",
"baby",
"back",
"bad",
"bag",
"ball",
"bank",
"bar",
"base",
"be",
"beat",
"beautiful",
"because",
"become",
"bed",
"before",
"begin",
"behavior",
"behind",
"believe",
"benefit",
"best",
"better",
"between",
"beyond",
"big",
"bill",
"billion",
"bit",
"black",
"blood",
"blue",
"board",
"body",
"book",
"born",
"both",
"box",
"boy",
"break",
"bring",
"brother",
"budget",
"build",
"building",
"business",
"but",
"buy",
"by",
"call",
"camera",
"campaign",
"can",
"candidate",
"capital",
"car",
"card",
"care",
"career",
"carry",
"case",
"catch",
"cause",
"cell",
"center",
"central",
"century",
"certain",
"certainly",
"chair",
"challenge",
"chance",
"change",
"character",
"charge",
"check",
"child",
"choice",
"choose",
"church",
"citizen",
"city",
"civil",
"claim",
"class",
"clear",
"clearly",
"close",
"coach",
"cold",
"collection",
"college",
"color",
"commercial",
"common",
"community",
"company",
"compare",
"computer",
"concern",
"condition",
"conference",
"Congress",
"consider",
"consumer",
"contain",
"continue",
"control",
"cost",
"could",
"country",
"couple",
"course",
"court",
"cover",
"create",
"crime",
"cultural",
"culture",
"cup",
"current",
"customer",
"cut",
"dark",
"data",
"daughter",
"day",
"deal",
"debate",
"decade",
"decide",
"decision",
"deep",
"defense",
"degree",
"Democrat",
"democratic",
"describe",
"design",
"despite",
"detail",
"determine",
"develop",
"development",
"difference",
"different",
"difficult",
"dinner",
"direction",
"director",
"discover",
"discuss",
"discussion",
"do",
"doctor",
"dog",
"door",
"down",
"draw",
"dream",
"drive",
"drop",
"drug",
"during",
"each",
"early",
"east",
"easy",
"eat",
"economic",
"economy",
"edge",
"education",
"effect",
"effort",
"eight",
"either",
"election",
"else",
"employee",
"end",
"energy",
"enjoy",
"enough",
"enter",
"entire",
"environment",
"environmental",
"especially",
"establish",
"even",
"evening",
"event",
"ever",
"every",
"everybody",
"everyone",
"everything",
"evidence",
"exactly",
"example",
"executive",
"exist",
"expect",
"experience",
"expert",
"explain",
"eye",
"face",
"fact",
"factor",
"fall",
"family",
"far",
"fast",
"father",
"fear",
"federal",
"feel",
"feeling",
"few",
"field",
"fight",
"figure",
"fill",
"film",
"final",
"finally",
"financial",
"find",
"fine",
"finish",
"fire",
"firm",
"first",
"fish",
"five",
"floor",
"fly",
"focus",
"follow",
"food",
"foot",
"for",
"force",
"foreign",
"forget",
"form",
"former",
"forward",
"four",
"free",
"friend",
"from",
"front",
"full",
"fund",
"future",
"game",
"garden",
"gas",
"general",
"generation",
"get",
"girl",
"give",
"glass",
"go",
"goal",
"good",
"government",
"great",
"green",
"ground",
"group",
"grow",
"growth",
"guess",
"gun",
"guy",
"hair",
"half",
"hand",
"happen",
"happy",
"hard",
"have",
"he",
"head",
"health",
"hear",
"heart",
"heavy",
"help",
"her",
"here",
"herself",
"high",
"him",
"himself",
"his",
"history",
"hit",
"hold",
"home",
"hope",
"hospital",
"hot",
"hotel",
"hour",
"house",
"how",
"however",
"huge",
"human",
"hundred",
"husband",
"I",
"idea",
"identify",
"if",
"image",
"imagine",
"impact",
"important",
"improve",
"in",
"include",
"including",
"increase",
"indeed",
"indicate",
"individual",
"industry",
"information",
"inside",
"instead",
"institution",
"interest",
"interesting",
"international",
"interview",
"into",
"investment",
"involve",
"issue",
"it",
"item",
"its",
"itself",
"job",
"join",
"just",
"keep",
"key",
"kid",
"kind",
"kitchen",
"know",
"knowledge",
"land",
"language",
"large",
"last",
"late",
"later",
"laugh",
"law",
"lawyer",
"lay",
"lead",
"leader",
"learn",
"least",
"leave",
"left",
"leg",
"less",
"let",
"letter",
"level",
"life",
"light",
"like",
"likely",
"line",
"list",
"listen",
"little",
"live",
"local",
"long",
"look",
"lose",
"loss",
"lot",
"low",
"machine",
"magazine",
"main",
"maintain",
"major",
"majority",
"make",
"man",
"manage",
"management",
"manager",
"many",
"market",
"marriage",
"material",
"matter",
"may",
"maybe",
"me",
"mean",
"measure",
"media",
"medical",
"meet",
"meeting",
"member",
"memory",
"mention",
"message",
"method",
"middle",
"might",
"military",
"million",
"mind",
"minute",
"miss",
"mission",
"model",
"modern",
"moment",
"money",
"month",
"more",
"morning",
"most",
"mother",
"mouth",
"move",
"movement",
"movie",
"Mr",
"Mrs",
"much",
"music",
"must",
"my",
"myself",
"name",
"nation",
"national",
"natural",
"nature",
"near",
"nearly",
"necessary",
"need",
"network",
"never",
"new",
"news",
"newspaper",
"next",
"nice",
"night",
"no",
"none",
"nor",
"north",
"not",
"note",
"nothing",
"notice",
"now",
"number",
"occur",
"of",
"off",
"offer",
"office",
"officer",
"official",
"often",
"oil",
"ok",
"old",
"on",
"once",
"one",
"only",
"onto",
"open",
"operation",
"opportunity",
"option",
"or",
"order",
"organization",
"other",
"others",
"our",
"out",
"outside",
"over",
"own",
"owner",
"page",
"painting",
"paper",
"parent",
"part",
"participant",
"particular",
"particularly",
"partner",
"party",
"pass",
"past",
"pattern",
"pay",
"peace",
"people",
"per",
"perform",
"performance",
"perhaps",
"person",
"personal",
"phone",
"physical",
"pick",
"picture",
"piece",
"place",
"plan",
"plant",
"play",
"player",
"PM",
"point",
"police",
"policy",
"political",
"politics",
"poor",
"popular",
"population",
"position",
"positive",
"possible",
"power",
"practice",
"prepare",
"present",
"president",
"pressure",
"pretty",
"prevent",
"price",
"probably",
"process",
"produce",
"product",
"production",
"professional",
"professor",
"program",
"project",
"property",
"protect",
"prove",
"provide",
"public",
"pull",
"purpose",
"push",
"put",
"quality",
"question",
"quickly",
"quite",
"race",
"radio",
"raise",
"range",
"rate",
"rather",
"reach",
"read",
"ready",
"real",
"reality",
"realize",
"really",
"reason",
"receive",
"recent",
"recently",
"recognize",
"record",
"red",
"reduce",
"reflect",
"region",
"relate",
"relationship",
"religious",
"remain",
"remember",
"report",
"represent",
"Republican",
"require",
"research",
"resource",
"respond",
"response",
"responsibility",
"rest",
"result",
"return",
"reveal",
"rich",
"right",
"rise",
"risk",
"road",
"rock",
"role",
"room",
"rule",
"run",
"safe",
"same",
"save",
"say",
"scene",
"school",
"science",
"scientist",
"score",
"sea",
"season",
"seat",
"second",
"section",
"security",
"see",
"seek",
"seem",
"sell",
"send",
"senior",
"sense",
"series",
"serious",
"serve",
"service",
"set",
"seven",
"several",
"shake",
"share",
"she",
"short",
"should",
"shoulder",
"show",
"side",
"sign",
"significant",
"similar",
"simple",
"simply",
"since",
"sing",
"single",
"sister",
"sit",
"site",
"situation",
"six",
"size",
"skill",
"skin",
"small",
"smile",
"so",
"social",
"society",
"soldier",
"some",
"somebody",
"someone",
"something",
"sometimes",
"son",
"song",
"soon",
"sort",
"sound",
"source",
"south",
"southern",
"space",
"speak",
"special",
"specific",
"speech",
"spend",
"sport",
"spring",
"staff",
"stage",
"stand",
"standard",
"star",
"start",
"state",
"statement",
"station",
"stay",
"step",
"still",
"stock",
"stop",
"store",
"story",
"strategy",
"street",
"strong",
"structure",
"student",
"study",
"stuff",
"style",
"subject",
"success",
"successful",
"such",
"suddenly",
"suffer",
"suggest",
"summer",
"support",
"sure",
"surface",
"system",
"table",
"take",
"talk",
"task",
"tax",
"teach",
"teacher",
"team",
"technology",
"television",
"tell",
"ten",
"tend",
"term",
"test",
"than",
"thank",
"that",
"the",
"their",
"them",
"themselves",
"then",
"theory",
"there",
"these",
"they",
"thing",
"think",
"third",
"this",
"those",
"though",
"thought",
"thousand",
"threat",
"three",
"through",
"throughout",
"throw",
"thus",
"time",
"to",
"today",
"together",
"tonight",
"too",
"top",
"total",
"tough",
"toward",
"town",
"trade",
"traditional",
"training",
"travel",
"treat",
"treatment",
"tree",
"trial",
"trip",
"trouble",
"true",
"truth",
"try",
"turn",
"TV",
"two",
"type",
"under",
"understand",
"unit",
"until",
"up",
"upon",
"us",
"use",
"usually",
"value",
"various",
"very",
"view",
"visit",
"voice",
"vote",
"wait",
"walk",
"wall",
"want",
"war",
"watch",
"water",
"way",
"we",
"wear",
"week",
"weight",
"well",
"west",
"western",
"what",
"whatever",
"when",
"where",
"whether",
"which",
"while",
"white",
"who",
"whole",
"whom",
"whose",
"why",
"wide",
"wife",
"will",
"win",
"wind",
"window",
"wish",
"with",
"within",
"without",
"woman",
"wonder",
"word",
"work",
"worker",
"world",
"worry",
"would",
"write",
"writer",
"wrong",
"yard",
"yeah",
"year",
"yes",
"yet",
"you",
"young",
"your",
"yourself",
)
|
joke2k/faker
|
faker/providers/lorem/en_US/__init__.py
|
Python
|
mit
| 17,423
|
[
"VisIt"
] |
2784b7e5b3a9e40f873588a6939dc714d5f25aaa831c81c5f543535f179b7b68
|
from __future__ import print_function, absolute_import, division
import _pygaussian
import f90wrap.runtime
import logging
class Gaussianstuff(f90wrap.runtime.FortranModule):
"""
Module gaussianstuff
Defined at gaussian.F90 lines 4-789
"""
@f90wrap.runtime.register_class("gaussian")
class gaussian(f90wrap.runtime.FortranDerivedType):
"""
Type(name=gaussian)
Defined at gaussian.F90 lines 48-68
"""
def __init__(self, n_, periodic_, handle=None):
"""
self = Gaussian(n_, periodic_)
Defined at gaussian.F90 lines 72-91
Parameters
----------
n_ : int
periodic_ : bool
Returns
-------
this : Gaussian
"""
f90wrap.runtime.FortranDerivedType.__init__(self)
self._handle = _pygaussian.f90wrap_init(n_=n_, periodic_=periodic_)
def __del__(self):
"""
Destructor for class Gaussian
Defined at gaussian.F90 lines 93-100
Parameters
----------
this : Gaussian
"""
if self._alloc:
_pygaussian.f90wrap_destroy(this=self._handle)
def filter1(self, f, fil, nb, nc, bc1_=None, bcn_=None):
"""
filter1(self, f, fil, nb, nc[, bc1_, bcn_])
Defined at gaussian.F90 lines 102-334
Parameters
----------
this : Gaussian
f : float array
fil : float array
nb : int
nc : int
bc1_ : int
bcn_ : int
"""
_pygaussian.f90wrap_filter1(this=self._handle, f=f, fil=fil, nb=nb, nc=nc, \
bc1_=bc1_, bcn_=bcn_)
def filter2(self, f, fil, na, nc, bc1_=None, bcn_=None):
"""
filter2(self, f, fil, na, nc[, bc1_, bcn_])
Defined at gaussian.F90 lines 336-564
Parameters
----------
this : Gaussian
f : float array
fil : float array
na : int
nc : int
bc1_ : int
bcn_ : int
"""
_pygaussian.f90wrap_filter2(this=self._handle, f=f, fil=fil, na=na, nc=nc, \
bc1_=bc1_, bcn_=bcn_)
def filter3(self, f, fil, na, nb, bc1_=None, bcn_=None):
"""
filter3(self, f, fil, na, nb[, bc1_, bcn_])
Defined at gaussian.F90 lines 566-789
Parameters
----------
this : Gaussian
f : float array
fil : float array
na : int
nb : int
bc1_ : int
bcn_ : int
"""
_pygaussian.f90wrap_filter3(this=self._handle, f=f, fil=fil, na=na, nb=nb, \
bc1_=bc1_, bcn_=bcn_)
_dt_array_initialisers = []
_dt_array_initialisers = []
gaussianstuff = Gaussianstuff()
|
FPAL-Stanford-University/FloATPy
|
floatpy/filters/pygaussian.py
|
Python
|
lgpl-3.0
| 3,387
|
[
"Gaussian"
] |
42e0182f091e1b0a19434ac8f760528f65f795fc2273b6faa1e82c038a88cca6
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['Aperture', 'Aperture_Watchpoint', 'Watchpoint']
for el_name in names:
if el_name == 'Aperture':
# Aperture: aperture 33.1798m
el.append(srwlib.SRWLOptA(
_shape=v.op_Aperture_shape,
_ap_or_ob='a',
_Dx=v.op_Aperture_Dx,
_Dy=v.op_Aperture_Dy,
_x=v.op_Aperture_x,
_y=v.op_Aperture_y,
))
pp.append(v.op_Aperture_pp)
elif el_name == 'Aperture_Watchpoint':
# Aperture_Watchpoint: drift 33.1798m
el.append(srwlib.SRWLOptD(
_L=v.op_Aperture_Watchpoint_L,
))
pp.append(v.op_Aperture_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 45.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'Tabulated Undulator Example', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', 0.0, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.0007, 'electron beam relative energy spread'],
['ebm_emx', 'f', 1.5e-09, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 1.84, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 1.17, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3.0, 'undulator length [m]'],
['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 2, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 2, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 2, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 8019.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0015, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0015, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 0.3, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 2, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 't', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# Aperture: aperture
['op_Aperture_shape', 's', 'r', 'shape'],
['op_Aperture_Dx', 'f', 0.00025, 'horizontalSize'],
['op_Aperture_Dy', 'f', 0.00025, 'verticalSize'],
['op_Aperture_x', 'f', 0.0, 'horizontalOffset'],
['op_Aperture_y', 'f', 0.0, 'verticalOffset'],
# Aperture_Watchpoint: drift
['op_Aperture_Watchpoint_L', 'f', 11.8202, 'length'],
#---Propagation parameters
['op_Aperture_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture'],
['op_Aperture_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture_Watchpoint'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
])
def setup_magnetic_measurement_files(filename, v):
import os
import re
import zipfile
z = zipfile.ZipFile(filename)
z.extractall()
for f in z.namelist():
if re.search(r'\.txt', f):
v.und_mfs = os.path.basename(f)
v.und_mdir = os.path.dirname(f) or './'
return
raise RuntimeError('missing magnetic measurement index *.txt file')
def main():
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
setup_magnetic_measurement_files("magnetic_measurements.zip", v)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
|
mrakitin/sirepo
|
tests/template/srw_generate_data/tabulated-undulator-example.py
|
Python
|
apache-2.0
| 21,771
|
[
"Gaussian"
] |
d9afb4489f2d5f13179800ddc7cb6b3fa9cef64d3a90c236d7a79408d8430995
|
"""
Tests the ComponentMonitoring DB and Service by creating, checking,
updating and removing several instances of each table in the DB
This program assumes that the service Framework/ComponentMonitoring is running
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import datetime
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
class TestClientComponentMonitoring(unittest.TestCase):
"""
TestCase-inheriting class with setUp and tearDown methods
"""
def setUp(self):
"""
Initialize the client on every test
"""
self.client = ComponentMonitoringClient()
def tearDown(self):
"""
Nothing is done on termination
"""
pass
class ComponentMonitoringClientChain(TestClientComponentMonitoring):
"""
Contains methods for testing of separate elements
"""
def testComponents(self):
"""
Test the Components database operations
"""
# Create a sample component
result = self.client.addComponent({'System': 'Test',
'Module': 'TestModule',
'Type': 'TestingFeature'})
self.assertTrue(result['OK'])
# Check if the component exists
result = self.client.getComponents({'System': 'Test',
'Module': 'TestModule',
'Type': 'TestingFeature'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Update the fields of the created component
result = self.client.updateComponents({'System': 'Test',
'Module': 'TestModule',
'Type': 'TestingFeature'},
{'Module': 'NewTestModule'})
self.assertTrue(result['OK'])
# Check if the component with the modified fields exists
result = self.client.getComponents({'System': 'Test',
'Module': 'NewTestModule',
'Type': 'TestingFeature'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Remove the Component
result = self.client.removeComponents({'System': 'Test',
'Module': 'NewTestModule',
'Type': 'TestingFeature'})
self.assertTrue(result['OK'])
# Check if the component was actually removed
result = self.client.getComponents({'System': 'Test',
'Module': 'NewTestModule',
'Type': 'TestingFeature'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
# Try to create an incomplete component
result = self.client.addComponent({'System': 'Test'})
self.assertFalse(result['OK'])
# Multiple removal
self.client.addComponent({'System': 'Test',
'Module': 'TestModule1',
'Type': 'TestingFeature1'})
self.client.addComponent({'System': 'Test',
'Module': 'TestModule2',
'Type': 'TestingFeature1'})
self.client.addComponent({'System': 'Test',
'Module': 'TestModule1',
'Type': 'TestingFeature2'})
self.client.removeComponents({'System': 'Test', 'Module': 'TestModule1'})
result = self.client.getComponents({'System': 'Test',
'Module': 'TestModule2',
'Type': 'TestingFeature1'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) >= 1)
result = self.client.getComponents({'System': 'Test',
'Module': 'TestModule1'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
self.client.removeComponents({'System': 'Test',
'Module': 'TestModule2',
'Type': 'TestingFeature1'})
self.assertTrue(result['OK'])
def testHosts(self):
"""
Tests the Hosts database operations
"""
# Create a sample host
result = self.client.addHost({'HostName': 'TestHost', 'CPU': 'TestCPU'})
self.assertTrue(result['OK'])
# Check if the host exists
result = self.client.getHosts({'HostName': 'TestHost',
'CPU': 'TestCPU'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Update the fields of the created host
result = self.client.updateHosts({'HostName': 'TestHost',
'CPU': 'TestCPU'},
{'HostName': 'StillATestHost'})
self.assertTrue(result['OK'])
# Check if the host with the modified fields exists
result = self.client.getHosts({'HostName': 'StillATestHost',
'CPU': 'TestCPU'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Remove the Host
result = self.client.removeHosts({'HostName': 'StillATestHost',
'CPU': 'TestCPU'})
self.assertTrue(result['OK'])
# Check if the host was actually removed
result = self.client.getHosts({'HostName': 'StillATestHost',
'CPU': 'TestCPU'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
# Try to create an incomplete host
result = self.client.addHost({'HostName': 'TestHost'})
self.assertFalse(result['OK'])
# Multiple removal
self.client.addHost({'HostName': 'TestHost', 'CPU': 'TestCPU1'})
self.client.addHost({'HostName': 'TestHost', 'CPU': 'TestCPU2'})
self.client.addHost({'HostName': 'TestHost', 'CPU': 'TestCPU1'})
self.client.removeHosts({'CPU': 'TestCPU1'})
result = self.client.getHosts({'HostName': 'TestHost',
'CPU': 'TestCPU2'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) >= 1)
result = self.client.getHosts({'HostName': 'TestHost',
'CPU': 'TestCPU1'},
False,
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
self.client.removeHosts({'HostName': 'TestHost', 'CPU': 'TestCPU2'})
self.assertTrue(result['OK'])
def testInstallations(self):
"""
Test the InstalledComponents database operations
"""
# Create a sample installation
result = self.client.addInstallation({'InstallationTime': datetime.datetime.now(),
'UnInstallationTime': datetime.datetime.now(),
'Instance': 'TestInstallA111'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
True)
self.assertTrue(result['OK'])
# Check if the installation exists
result = self.client.getInstallations({'Instance': 'TestInstallA111'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Update the fields of the created installation
result = self.client.updateInstallations({'Instance': 'TestInstallA111'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
{'Instance': 'TestInstallA222'}
)
self.assertTrue(result['OK'])
# Check if the installation with the modified fields exists
result = self.client.getInstallations({'Instance': 'TestInstallA222'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.assertTrue(result['OK'] and len(result['Value']) > 0)
# Remove the Installation
result = self.client.removeInstallations({'Instance': 'TestInstallA222'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'})
self.assertTrue(result['OK'])
# Check if the installation was actually removed
result = self.client.getInstallations({'Instance': 'TestInstallA222'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
# Create an installation associated with nonexistent Component
result = self.client.addInstallation(
{'InstallationTime': datetime.datetime.now(),
'UnInstallationTime': datetime.datetime.now(),
'Instance': 'TestInstallA333'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule22A',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.assertFalse(result['OK'])
# Multiple removal
self.client.addInstallation(
{'InstallationTime': datetime.datetime.now(),
'UnInstallationTime': datetime.datetime.now(),
'Instance': 'MultipleRemovalInstall1'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.client.addInstallation(
{'InstallationTime': datetime.datetime.now(),
'UnInstallationTime': datetime.datetime.now(),
'Instance': 'MultipleRemovalInstall2'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
False)
self.client.addInstallation(
{'InstallationTime': datetime.datetime.now(),
'UnInstallationTime': datetime.datetime.now(),
'Instance': 'MultipleRemovalInstall3'},
{'System': 'UnexistentSystem',
'Module': 'UnexistentModule2',
'Type': 'UnexistentType'},
{'HostName': 'fictional',
'CPU': 'TestCPU'},
True)
result = self.client.getInstallations(
{'Instance':
['MultipleRemovalInstall1', 'MultipleRemovalInstall3']},
{},
{},
False)
self.assertTrue(result['OK'] and len(result['Value']) == 2)
self.client.removeInstallations({},
{'Module': 'UnexistentModule'},
{})
result = self.client.getInstallations({},
{'Module': 'UnexistentModule2'},
{}, False)
self.assertTrue(result['OK'] and len(result['Value']) >= 1)
result = self.client.getInstallations({},
{'Module': 'UnexistentModule'},
{},
False)
self.assertTrue(result['OK'] and len(result['Value']) <= 0)
self.client.removeInstallations({},
{'Module': 'UnexistentModule2'},
{})
self.assertTrue(result['OK'])
# Clean up what we created
self.client.removeHosts({'HostName': 'fictional', 'CPU': 'TestCPU'})
self.client.removeComponents({'System': 'UnexistentSystem',
'Module': 'UnexistentModule',
'Type': 'UnexistentType'})
self.client.removeComponents({'System': 'UnexistentSystem',
'Module': 'UnexistentModule2',
'Type': 'UnexistentType'})
def testHostLogging(self):
"""
Tests the HostLogging database operations
"""
# Create a sample log
result = self.client.updateLog('TestHost', {'DIRACVersion': 'v6r15'})
self.assertTrue(result['OK'])
# Check that the log exists
result = self.client.getLog('TestHost')
self.assertTrue(result['OK'] and result['Value'][0]['DIRACVersion'] == 'v6r15')
# Update the fields of the created log
result = self.client.updateLog('TestHost', {'hostName': 'StillATestHost'})
self.assertTrue(result['OK'])
# Check if the log with the modified fields exists
result = self.client.getLog('StillATestHost')
self.assertTrue(result['OK'] and result['Value'][0]['DIRACVersion'] == 'v6r15')
# Remove the log
result = self.client.removeLogs({'hostName': 'StillATestHost'})
self.assertTrue(result['OK'])
# Check that the log was actually removed
result = self.client.getLog('StillATestHost')
self.assertFalse(result['OK'])
# Multiple removal
self.client.updateLog('TestHostA', {'DIRACVersion': 'v7r0'})
self.client.updateLog('TestHostB', {'DIRACVersion': 'v7r0'})
self.client.updateLog('TestHostC', {'DIRACVersion': 'v7r1'})
self.client.removeLogs({'DIRACVersion': 'v7r0'})
result = self.client.getLog('TestHostC')
self.assertTrue(result['OK'] and len(result['Value']) >= 1)
result = self.client.getLog('TestHostB')
self.assertFalse(result['OK'])
result = self.client.removeLogs({'DIRACVersion': 'v7r1'})
self.assertTrue(result['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientComponentMonitoring)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase
(ComponentMonitoringClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
arrabito/DIRAC
|
tests/Integration/Framework/Test_InstalledComponentsDB.py
|
Python
|
gpl-3.0
| 16,132
|
[
"DIRAC"
] |
4fe3347e5847dc4df5e0163569159a1c17cbcd4e63d92ec0bb92bbe392c4377b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.