text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
## Copyright (c) 2015-2018, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exatomic.nwchem.output`
#############################################
"""
#import numpy as np
#import pandas as pd
from unittest import TestCase
from exatomic.base import resource
from exatomic.nwchem.output import Output
class TestNWChemOutput(TestCase):
def setUp(self):
self.mam1 = Output(resource('nw-ch3nh2-631g.out'))
self.mam2 = Output(resource('nw-ch3nh2-augccpvdz.out'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.mam2.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertEqual(self.mam2.atom.shape[0], 7)
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.mam2.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertEqual(self.mam2.orbital.shape[0], 91)
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.mam2.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertEqual(self.mam2.basis_set.shape[0], 57)
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
def test_parse_frame(self):
self.mam1.parse_frame()
self.mam2.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertEqual(self.mam2.frame.shape[0], 1)
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.mam2.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
def test_to_universe(self):
self.mam1.to_universe()
self.mam2.to_universe()
|
alexvmarch/atomic
|
exatomic/nwchem/tests/test_output.py
|
Python
|
apache-2.0
| 1,992
|
[
"NWChem"
] |
6a3be71ffdc182002451b1d370778a24cec0acee9d8bb911e468797fc2ff9a0d
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for phylogenetic tree formats.
This API follows the same semantics as Biopython's SeqIO and AlignIO.
"""
__docformat__ = "epytext en"
from Bio.Phylo import BaseTree, NewickIO, NexusIO
# Python 2.4 doesn't have ElementTree, which PhyloXMLIO needs
try:
from Bio.Phylo import PhyloXMLIO
except ImportError:
# TODO: should we issue a warning? the installer will have already whined
# raise MissingPythonDependencyError(
# "Install an ElementTree implementation if you want to use "
# "Bio.Phylo to parse phyloXML files.")
supported_formats = {
'newick': NewickIO,
'nexus': NexusIO,
}
else:
supported_formats = {
'newick': NewickIO,
'nexus': NexusIO,
'phyloxml': PhyloXMLIO,
}
def parse(file, format):
"""Iteratively parse a file and return each of the trees it contains.
If a file only contains one tree, this still returns an iterable object that
contains one element.
Example::
>>> trees = parse('../../Tests/PhyloXML/apaf.xml', 'phyloxml')
>>> for tree in trees:
... print tree.rooted
True
"""
do_close = False
if isinstance(file, basestring):
file = open(file, 'r')
do_close = True
# Py2.4 compatibility: this should be in a try/finally block
# try:
for tree in getattr(supported_formats[format], 'parse')(file):
yield tree
# finally:
if do_close:
file.close()
def read(file, format):
"""Parse a file in the given format and return a single tree.
Raises a ValueError if there are zero or multiple trees -- if this occurs,
use parse() instead to get the complete sequence of trees.
"""
try:
tree_gen = parse(file, format)
tree = tree_gen.next()
except StopIteration:
raise ValueError("There are no trees in this file.")
try:
tree_gen.next()
except StopIteration:
return tree
else:
raise ValueError(
"There are multiple trees in this file; use parse() instead.")
def write(trees, file, format, **kwargs):
"""Write a sequence of trees to file in the given format."""
if isinstance(trees, BaseTree.Tree):
# Passed a single tree instead of an iterable -- that's OK
trees = [trees]
do_close = False
if isinstance(file, basestring):
file = open(file, 'w+')
do_close = True
try:
n = getattr(supported_formats[format], 'write')(trees, file, **kwargs)
finally:
if do_close:
file.close()
return n
def convert(in_file, in_format, out_file, out_format, **kwargs):
"""Convert between two tree file formats."""
trees = parse(in_file, in_format)
return write(trees, out_file, out_format, **kwargs)
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Phylo/_io.py
|
Python
|
gpl-2.0
| 3,108
|
[
"Biopython"
] |
0296a06d341b79637b20e7624b733de5c724b01777811d84d1d2883852a1ce7d
|
from uuid import uuid4
import lightify
from Firefly import logging, scheduler
from Firefly.const import AUTHOR, COMMAND_UPDATE
from Firefly.core.service_handler import ServiceConfig, ServicePackage
from Firefly.helpers.events import Command
from Firefly.helpers.service import Service
TITLE = 'Lightify Lights'
SERVICE_ID = 'service_lightify'
COMMANDS = ['send_command', 'refresh', 'send_request']
REQUESTS = ['get_lights', 'get_groups']
SECTION = 'LIGHTIFY'
def Setup(firefly, package, alias, ff_id, service_package: ServicePackage, config: ServiceConfig, **kwargs):
logging.info('Setting up %s service' % service_package.name)
lighify = Lightify(firefly, alias, ff_id, service_package, config, **kwargs)
firefly.install_component(lighify)
return True
class Lightify(Service):
def __init__(self, firefly, alias, ff_id, service_package: ServicePackage, config: ServiceConfig, **kwargs):
# TODO: Fix this
package = service_package.package
super().__init__(firefly, SERVICE_ID, package, TITLE, AUTHOR, COMMANDS, REQUESTS)
self.config = config
self.ip = config.ip
self.bridge = lightify.Lightify(self.ip)
self.refrsh_id = str(uuid4())
scheduler.runEveryS(10, self.refresh, job_id=self.refrsh_id)
def refresh(self):
self.bridge.update_all_light_status()
self.bridge.update_group_list()
for ff_id, light in self.bridge.lights().items():
ff_id = str(ff_id)
if ff_id in self.firefly.components:
command = Command(ff_id, SERVICE_ID, COMMAND_UPDATE, lightify_object=light)
self.firefly.send_command(command)
else:
self._firefly.install_package('Firefly.components.lightify.lightify_light', ff_id=ff_id, alias=light.name(), lightify_object=light)
for name, group in self.bridge.groups().items():
ff_id = 'lightify-group-%s' % name.replace(' ', '_')
if ff_id in self.firefly.components:
command = Command(ff_id, SERVICE_ID, COMMAND_UPDATE, lightify_object=group, lightify_bridge=self.bridge)
self.firefly.send_command(command)
else:
self._firefly.install_package('Firefly.components.lightify.lightify_group', ff_id=ff_id, alias=name, lightify_object=group, lightify_bridge=self.bridge)
|
Firefly-Automation/Firefly
|
Firefly/services/lightify.py
|
Python
|
apache-2.0
| 2,231
|
[
"Firefly"
] |
801f607f867c002694b78e0fc1e9be03d093f7cc396f8fd12e6dc190a632d13b
|
"""Code for the B{0store} command-line interface."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
import sys, os, errno
from zeroinstall.zerostore import manifest
from zeroinstall.zerostore.manifest import verify, get_algorithm, copy_tree_with_verify
from zeroinstall import zerostore, SafeException, support
stores = None
def init_stores():
global stores
assert stores is None
if stores is None:
stores = zerostore.Stores()
class UsageError(SafeException): pass
def do_manifest(args):
"""manifest DIRECTORY [ALGORITHM]"""
if len(args) < 1 or len(args) > 2: raise UsageError(_("Wrong number of arguments"))
if len(args) == 2:
alg = get_algorithm(args[1])
else:
# If no algorithm was given, guess from the directory name
name = os.path.basename(args[0])
try:
alg, unused = manifest.splitID(name)
except zerostore.BadDigest:
alg = get_algorithm('sha1new')
digest = alg.new_digest()
for line in alg.generate_manifest(args[0]):
print(line)
digest.update((line + '\n').encode('utf-8'))
print(alg.getID(digest))
sys.exit(0)
def do_find(args):
"""find DIGEST"""
if len(args) != 1: raise UsageError(_("Wrong number of arguments"))
try:
print(stores.lookup(args[0]))
sys.exit(0)
except zerostore.BadDigest as ex:
print(ex, file=sys.stderr)
except zerostore.NotStored as ex:
print(ex, file=sys.stderr)
sys.exit(1)
def do_add(args):
"""add DIGEST (DIRECTORY | (ARCHIVE [EXTRACT]))"""
from zeroinstall.zerostore import unpack
if len(args) < 2: raise UsageError(_("Missing arguments"))
digest = args[0]
if os.path.isdir(args[1]):
if len(args) > 2: raise UsageError(_("Too many arguments"))
stores.add_dir_to_cache(digest, args[1])
elif os.path.isfile(args[1]):
if len(args) > 3: raise UsageError(_("Too many arguments"))
if len(args) > 2:
extract = args[2]
else:
extract = None
type = unpack.type_from_url(args[1])
if not type:
raise SafeException(_("Unknown extension in '%s' - can't guess MIME type") % args[1])
unpack.check_type_ok(type)
with open(args[1], 'rb') as stream:
stores.add_archive_to_cache(digest, stream, args[1], extract, type = type)
else:
try:
os.stat(args[1])
except OSError as ex:
if ex.errno != errno.ENOENT: # No such file or directory
raise UsageError(str(ex)) # E.g. permission denied
raise UsageError(_("No such file or directory '%s'") % args[1])
def do_optimise(args):
"""optimise [ CACHE ]"""
if len(args) == 1:
cache_dir = args[0]
else:
cache_dir = stores.stores[0].dir
cache_dir = os.path.realpath(cache_dir)
import stat
info = os.stat(cache_dir)
if not stat.S_ISDIR(info.st_mode):
raise UsageError(_("Not a directory: '%s'") % cache_dir)
impl_name = os.path.basename(cache_dir)
if impl_name != 'implementations':
raise UsageError(_("Cache directory should be named 'implementations', not\n"
"'%(name)s' (in '%(cache_dir)s')") % {'name': impl_name, 'cache_dir': cache_dir})
print(_("Optimising"), cache_dir)
from . import optimise
uniq_size, dup_size, already_linked, man_size = optimise.optimise(cache_dir)
print(_("Original size : %(size)s (excluding the %(manifest_size)s of manifests)") % {'size': support.pretty_size(uniq_size + dup_size), 'manifest_size': support.pretty_size(man_size)})
print(_("Already saved : %s") % support.pretty_size(already_linked))
if dup_size == 0:
print(_("No duplicates found; no changes made."))
else:
print(_("Optimised size : %s") % support.pretty_size(uniq_size))
perc = (100 * float(dup_size)) / (uniq_size + dup_size)
print(_("Space freed up : %(size)s (%(percentage).2f%%)") % {'size': support.pretty_size(dup_size), 'percentage': perc})
print(_("Optimisation complete."))
def do_verify(args):
"""verify (DIGEST | (DIRECTORY [DIGEST])"""
if len(args) == 2:
required_digest = args[1]
root = args[0]
elif len(args) == 1:
root = get_stored(args[0])
required_digest = None # Get from name
else:
raise UsageError(_("Missing DIGEST or DIRECTORY"))
print(_("Verifying"), root)
try:
verify(root, required_digest)
print(_("OK"))
except zerostore.BadDigest as ex:
print(str(ex))
if ex.detail:
print()
print(ex.detail)
sys.exit(1)
def do_audit(args):
"""audit [DIRECTORY]"""
if len(args) == 0:
audit_stores = stores.stores
else:
audit_stores = [zerostore.Store(x) for x in args]
audit_ls = []
total = 0
for a in audit_stores:
if os.path.isdir(a.dir):
items = sorted(os.listdir(a.dir))
audit_ls.append((a.dir, items))
total += len(items)
elif len(args):
raise SafeException(_("No such directory '%s'") % a.dir)
verified = 0
failures = []
i = 0
for root, impls in audit_ls:
print(_("Scanning %s") % root)
for required_digest in impls:
path = os.path.join(root, required_digest)
try:
(alg, digest) = zerostore.parse_algorithm_digest_pair(required_digest)
except zerostore.BadDigest:
print(_("Skipping non-implementation directory %s") % path)
continue
i += 1
try:
msg = _("[%(done)d / %(total)d] Verifying %(digest)s") % {'done': i, 'total': total, 'digest': required_digest}
print(msg, end='')
sys.stdout.flush()
verify(path, required_digest)
print("\r" + (" " * len(msg)) + "\r", end='')
verified += 1
except zerostore.BadDigest as ex:
print()
failures.append(path)
print(str(ex))
if ex.detail:
print()
print(ex.detail)
if failures:
print('\n' + _("List of corrupted or modified implementations:"))
for x in failures:
print(x)
print()
print(_("Checked %d items") % i)
print(_("Successfully verified implementations: %d") % verified)
print(_("Corrupted or modified implementations: %d") % len(failures))
if failures:
sys.exit(1)
def do_list(args):
"""list"""
if args: raise UsageError(_("List takes no arguments"))
print(_("User store (writable) : %s") % stores.stores[0].dir)
for s in stores.stores[1:]:
print(_("System store : %s") % s.dir)
if len(stores.stores) < 2:
print(_("No system stores."))
def get_stored(dir_or_digest):
"""@type dir_or_digest: str
@rtype: str"""
if os.path.isdir(dir_or_digest):
return dir_or_digest
else:
try:
return stores.lookup(dir_or_digest)
except zerostore.NotStored as ex:
print(ex, file=sys.stderr)
sys.exit(1)
def do_copy(args):
"""copy SOURCE [ TARGET ]"""
if len(args) == 2:
source, target = args
elif len(args) == 1:
source = args[0]
target = stores.stores[0].dir
else:
raise UsageError(_("Wrong number of arguments."))
if not os.path.isdir(source):
raise UsageError(_("Source directory '%s' not found") % source)
if not os.path.isdir(target):
raise UsageError(_("Target directory '%s' not found") % target)
manifest_path = os.path.join(source, '.manifest')
if not os.path.isfile(manifest_path):
raise UsageError(_("Source manifest '%s' not found") % manifest_path)
required_digest = os.path.basename(source)
with open(manifest_path, 'rb') as stream:
manifest_data = stream.read()
copy_tree_with_verify(source, target, manifest_data, required_digest)
def do_manage(args):
"""manage"""
if args:
raise UsageError(_("manage command takes no arguments"))
if sys.version_info[0] < 3:
import pygtk
pygtk.require('2.0')
else:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
from zeroinstall.gtkui import cache
from zeroinstall.injector.iface_cache import iface_cache
cache_explorer = cache.CacheExplorer(iface_cache)
cache_explorer.window.connect('destroy', gtk.main_quit)
cache_explorer.show()
gtk.main()
commands = [do_add, do_audit, do_copy, do_find, do_list, do_manifest, do_optimise, do_verify, do_manage]
|
linuxmidhun/0install
|
zeroinstall/zerostore/cli.py
|
Python
|
lgpl-2.1
| 7,801
|
[
"VisIt"
] |
f6247e2258a3fd2f7a018f850366c6c98c566582e876b56639a76cb103851f4d
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
from nose.tools import assert_equal
def create_cert_course():
world.clear_courses()
org = 'edx'
number = '999'
name = 'Certificates'
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org=org, number=number, display_name=name)
world.scenario_dict['course_id'] = world.scenario_dict['COURSE'].id
world.UPSELL_LINK_CSS = u'.message-upsell a.action-upgrade[href*="{}"]'.format(
world.scenario_dict['course_id']
)
honor_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='honor',
mode_display_name='honor mode',
min_price=0,
)
verfied_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='verified',
mode_display_name='verified cert course',
min_price=16,
suggested_prices='32,64,128',
currency='usd',
)
def register():
url = u'courses/{}/about'.format(world.scenario_dict['course_id'])
world.browser.visit(django_url(url))
world.css_click('section.intro a.register')
assert world.is_css_present('section.wrapper h3.title')
@step(u'I select the audit track$')
def select_the_audit_track(step):
create_cert_course()
register()
btn_css = 'input[name="honor_mode"]'
world.wait(1) # TODO remove this after troubleshooting JZ
world.css_find(btn_css)
world.css_click(btn_css)
def select_contribution(amount=32):
radio_css = 'input[value="{}"]'.format(amount)
world.css_click(radio_css)
assert world.css_find(radio_css).selected
def click_verified_track_button():
world.wait_for_ajax_complete()
btn_css = 'input[value="Pursue a Verified Certificate"]'
world.css_click(btn_css)
@step(u'I select the verified track for upgrade')
def select_verified_track_upgrade(step):
select_contribution(32)
world.wait_for_ajax_complete()
btn_css = 'input[value="Upgrade Your Enrollment"]'
world.css_click(btn_css)
# TODO: might want to change this depending on the changes for upgrade
assert world.is_css_present('section.progress')
@step(u'I select the verified track$')
def select_the_verified_track(step):
create_cert_course()
register()
select_contribution(32)
click_verified_track_button()
assert world.is_css_present('section.progress')
@step(u'I should see the course on my dashboard$')
def should_see_the_course_on_my_dashboard(step):
course_css = 'li.course-item'
assert world.is_css_present(course_css)
@step(u'I go to step "([^"]*)"$')
def goto_next_step(step, step_num):
btn_css = {
'1': '#face_next_button',
'2': '#face_next_link',
'3': '#photo_id_next_link',
'4': '#pay_button',
}
next_css = {
'1': 'div#wrapper-facephoto.carousel-active',
'2': 'div#wrapper-idphoto.carousel-active',
'3': 'div#wrapper-review.carousel-active',
'4': 'div#wrapper-review.carousel-active',
}
world.css_click(btn_css[step_num])
# Pressing the button will advance the carousel to the next item
# and give the wrapper div the "carousel-active" class
assert world.css_find(next_css[step_num])
@step(u'I capture my "([^"]*)" photo$')
def capture_my_photo(step, name):
# Hard coded red dot image
image_data = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='
snapshot_script = "$('#{}_image')[0].src = '{}';".format(name, image_data)
# Mirror the javascript of the photo_verification.html page
world.browser.execute_script(snapshot_script)
world.browser.execute_script("$('#{}_capture_button').hide();".format(name))
world.browser.execute_script("$('#{}_reset_button').show();".format(name))
world.browser.execute_script("$('#{}_approve_button').show();".format(name))
assert world.css_find('#{}_approve_button'.format(name))
@step(u'I approve my "([^"]*)" photo$')
def approve_my_photo(step, name):
button_css = {
'face': 'div#wrapper-facephoto li.control-approve',
'photo_id': 'div#wrapper-idphoto li.control-approve',
}
wrapper_css = {
'face': 'div#wrapper-facephoto',
'photo_id': 'div#wrapper-idphoto',
}
# Make sure that the carousel is in the right place
assert world.css_has_class(wrapper_css[name], 'carousel-active')
assert world.css_find(button_css[name])
# HACK: for now don't bother clicking the approve button for
# id_photo, because it is sending you back to Step 1.
# Come back and figure it out later. JZ Aug 29 2013
if name == 'face':
world.css_click(button_css[name])
# Make sure you didn't advance the carousel
assert world.css_has_class(wrapper_css[name], 'carousel-active')
@step(u'I select a contribution amount$')
def select_contribution_amount(step):
select_contribution(32)
@step(u'I confirm that the details match$')
def confirm_details_match(step):
# First you need to scroll down on the page
# to make the element visible?
# Currently chrome is failing with ElementNotVisibleException
world.browser.execute_script("window.scrollTo(0,1024)")
cb_css = 'input#confirm_pics_good'
world.css_click(cb_css)
assert world.css_find(cb_css).checked
@step(u'I am at the payment page')
def at_the_payment_page(step):
world.wait_for_present('input[name=transactionSignature]')
@step(u'I submit valid payment information$')
def submit_payment(step):
# First make sure that the page is done if it still executing
# an ajax query.
world.wait_for_ajax_complete()
button_css = 'input[value=Submit]'
world.css_click(button_css)
@step(u'I have submitted face and ID photos$')
def submitted_face_and_id_photos(step):
step.given('I am logged in')
step.given('I select the verified track')
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
@step(u'I have submitted photos to verify my identity')
def submitted_photos_to_verify_my_identity(step):
step.given('I have submitted face and ID photos')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I submit my photos and confirm')
def submit_photos_and_confirm(step):
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I see that my payment was successful')
def see_that_my_payment_was_successful(step):
title = world.css_find('div.wrapper-content-main h3.title')
assert_equal(title.text, u'Congratulations! You are now verified on edX.')
@step(u'I navigate to my dashboard')
def navigate_to_my_dashboard(step):
world.css_click('span.avatar')
assert world.css_find('section.my-courses')
@step(u'I see the course on my dashboard')
def see_the_course_on_my_dashboard(step):
course_link_css = u'section.my-courses a[href*="{}"]'.format(world.scenario_dict['course_id'])
assert world.is_css_present(course_link_css)
@step(u'I see the upsell link on my dashboard')
def see_upsell_link_on_my_dashboard(step):
course_link_css = world.UPSELL_LINK_CSS
assert world.is_css_present(course_link_css)
@step(u'I do not see the upsell link on my dashboard')
def see_no_upsell_link(step):
course_link_css = world.UPSELL_LINK_CSS
assert world.is_css_not_present(course_link_css)
@step(u'I select the upsell link on my dashboard')
def select_upsell_link_on_my_dashboard(step):
# expand the upsell section
world.css_click('.message-upsell')
course_link_css = world.UPSELL_LINK_CSS
# click the actual link
world.css_click(course_link_css)
@step(u'I see that I am on the verified track')
def see_that_i_am_on_the_verified_track(step):
id_verified_css = 'li.course-item article.course.verified'
assert world.is_css_present(id_verified_css)
@step(u'I leave the flow and return$')
def leave_the_flow_and_return(step):
world.visit(u'verify_student/verified/{}/'.format(world.scenario_dict['course_id']))
@step(u'I am at the verified page$')
def see_the_payment_page(step):
assert world.css_find('button#pay_button')
@step(u'I edit my name$')
def edit_my_name(step):
btn_css = 'a.retake-photos'
world.css_click(btn_css)
|
wwj718/ANALYSE
|
lms/djangoapps/courseware/features/certificates.py
|
Python
|
agpl-3.0
| 9,029
|
[
"VisIt"
] |
e76633790d6d6226c34946bde68d88b29b9edcfd2bb99dfa6c379d3d04be3b9d
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#检查history realtm文件中所有的个股,哪些是CX,哪些不是
#这是一个调试用的小方法
import sys
import re
import os
import string
import datetime
import shutil
sys.path.append(".")
from internal.trade_date import *
from internal.realtime_obj import *
from internal.analyze_realtime import *
from internal.handle_realtime import *
#Main
if __name__=="__main__":
curdate = ''
bLast = 0
trade_day = get_lastday()
pre_date = get_preday(0, trade_day)
#pre_date = trade_day
preStatItem = statisticsItem()
ret = parse_realtime_his_file(pre_date, preStatItem)
pre300_date = get_preday(CX_DAYS, pre_date)
print pre_date,pre300_date
st_list = []
ret = get_stk_code_by_dfcf(st_list, 'A', 0)
stkList = [
preStatItem.lst_non_yzcx_yzzt,
preStatItem.lst_non_yzcx_zt,
preStatItem.lst_non_yzcx_zthl,
preStatItem.lst_yzdt,
preStatItem.lst_dt,
preStatItem.lst_dtft,
]
for iList in stkList:
for item in iList:
bFlag = 0
for tdItem in st_list:
if tdItem[1] == item[0]:
bFlag=1
#print "Not Match",item[0],item[1],tdItem[-1]
trd_date = datetime.datetime.strptime(pre_date, '%Y-%m-%d').date()
mk_date = datetime.datetime.strptime(tdItem[-1], '%Y-%m-%d').date()
if (trd_date-mk_date).days>CX_DAYS:
pass
#print "",item[0],item[1],''
else:
print "",item[0],item[1],' CCCCCC'
if bFlag==0:
print "Not Match",item[0],item[1]
print ""
|
yudingding6197/fin_script
|
debug/his_rt/check_his_cx.py
|
Python
|
gpl-2.0
| 1,479
|
[
"BLAST"
] |
5fc47f5955afa38b2d379ef7973d04e6e7b8e304a794a2a1a584f9557936a49c
|
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import click
import cookiecutter
import cookiecutter.main
from molecule import config
from molecule import logger
from molecule import util
LOG = logger.get_logger(__name__)
def _process_templates(template_dir, extra_context, output_dir,
overwrite=True):
"""
Process templates as found in the named directory.
:param template_dir: A string containing an absolute or relative path to a
directory where the templates are located. If the provided directory is a
relative path, it is resolved using a known location.
:param extra_context: A dict of values that are used to override default
or user specified values.
:param output_dir: An string with an absolute path to a directory where the
templates should be written to.
:param overwrite: An optional bool whether or not to overwrite existing
templates.
:return: None
"""
template_dir = _resolve_template_dir(template_dir)
cookiecutter.main.cookiecutter(
template_dir,
extra_context=extra_context,
output_dir=output_dir,
overwrite_if_exists=overwrite,
no_input=True, )
def _resolve_template_dir(template_dir):
if not os.path.isabs(template_dir):
template_dir = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'cookiecutter',
template_dir)
return template_dir
def _init_new_role(command_args):
"""
>>> molecule init role --role-name foo
"""
role_name = command_args['role_name']
role_directory = os.getcwd()
LOG.info('Initializing new role {}...'.format(role_name))
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
_process_templates('role', command_args, role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**command_args),
'scenario/verifier/{verifier_name}'.format(**command_args)
]
for template in templates:
_process_templates(template, command_args, scenario_base_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg)
def _init_new_scenario(command_args):
"""
>>> molecule init scenario --scenario-name default --role-name foo
"""
scenario_name = command_args['scenario_name']
role_name = os.getcwd().split(os.sep)[-1]
role_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
LOG.info('Initializing new scenario {}...'.format(scenario_name))
molecule_directory = config.molecule_directory(
os.path.join(role_directory, role_name))
scenario_directory = os.path.join(molecule_directory, scenario_name)
scenario_base_directory = os.path.dirname(scenario_directory)
if os.path.isdir(scenario_directory):
msg = ('The directory molecule/{} exists. '
'Cannot create new scenario.').format(scenario_name)
util.sysexit_with_message(msg)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**command_args),
'scenario/verifier/{verifier_name}'.format(**command_args)
]
for template in templates:
_process_templates(template, command_args, scenario_base_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized scenario in {} successfully.'.format(scenario_directory)
LOG.success(msg)
@click.group()
def init():
""" Initialize a new role or scenario. """
@init.command()
@click.option(
'--dependency-name',
type=click.Choice(['galaxy']),
default='galaxy',
help='Name of dependency to initialize. (galaxy)')
@click.option(
'--driver-name',
type=click.Choice(config.molecule_drivers()),
default='docker',
help='Name of driver to initialize. (docker)')
@click.option(
'--lint-name',
type=click.Choice(['ansible-lint']),
default='ansible-lint',
help='Name of lint to initialize. (ansible-lint)')
@click.option(
'--provisioner-name',
type=click.Choice(['ansible']),
default='ansible',
help='Name of provisioner to initialize. (ansible)')
@click.option('--role-name', required=True, help='Name of the role to create.')
@click.option(
'--verifier-name',
type=click.Choice(config.molecule_verifiers()),
default='testinfra',
help='Name of verifier to initialize. (testinfra)')
def role(dependency_name, driver_name, lint_name, provisioner_name, role_name,
verifier_name): # pragma: no cover
""" Initialize a new role for use with Molecule. """
command_args = {
'dependency_name': dependency_name,
'driver_name': driver_name,
'lint_name': lint_name,
'provisioner_name': provisioner_name,
'role_name': role_name,
'scenario_name': 'default',
'subcommand': __name__,
'verifier_name': verifier_name,
}
_init_new_role(command_args)
@init.command()
@click.option(
'--dependency-name',
type=click.Choice(['galaxy']),
default='galaxy',
help='Name of dependency to initialize. (galaxy)')
@click.option(
'--driver-name',
type=click.Choice(config.molecule_drivers()),
default='docker',
help='Name of driver to initialize. (docker)')
@click.option(
'--lint-name',
type=click.Choice(['ansible-lint']),
default='ansible-lint',
help='Name of lint to initialize. (ansible-lint)')
@click.option(
'--provisioner-name',
type=click.Choice(['ansible']),
default='ansible',
help='Name of provisioner to initialize. (ansible)')
@click.option('--role-name', required=True, help='Name of the role to create.')
@click.option(
'--scenario-name', required=True, help='Name of the scenario to create.')
@click.option(
'--verifier-name',
type=click.Choice(config.molecule_verifiers()),
default='testinfra',
help='Name of verifier to initialize. (testinfra)')
def scenario(dependency_name, driver_name, lint_name, provisioner_name,
role_name, scenario_name, verifier_name): # pragma: no cover
""" Initialize a new scenario for use with Molecule. """
command_args = {
'dependency_name': dependency_name,
'driver_name': driver_name,
'lint_name': lint_name,
'provisioner_name': provisioner_name,
'role_name': role_name,
'scenario_name': scenario_name,
'subcommand': __name__,
'verifier_name': verifier_name,
}
_init_new_scenario(command_args)
|
retr0h/maquina
|
molecule/command/init.py
|
Python
|
mit
| 7,863
|
[
"Galaxy"
] |
8b0b3734157da4959772e78243f8279d79ad5e52db5f1267f6c1cd654161f393
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module has methods for parsing names and versions of packages from URLs.
The idea is to allow package creators to supply nothing more than the
download location of the package, and figure out version and name information
from there.
**Example:** when spack is given the following URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz
It can figure out that the package name is ``hdf``, and that it is at version
``4.2.12``. This is useful for making the creation of packages simple: a user
just supplies a URL and skeleton code is generated automatically.
Spack can also figure out that it can most likely download 4.2.6 at this URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz
This is useful if a user asks for a package at a particular version number;
spack doesn't need anyone to tell it where to get the tarball even though
it's never been told about that version before.
"""
import os
import re
from six import StringIO
from six.moves.urllib.parse import urlsplit, urlunsplit
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
import spack.error
import spack.util.compression as comp
from spack.version import Version
#
# Note: We call the input to most of these functions a "path" but the functions
# work on paths and URLs. There's not a good word for both of these, but
# "path" seemed like the most generic term.
#
def find_list_urls(url):
r"""Find good list URLs for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
========= =======================================================
Parameters:
url (str): The download URL for the package
Returns:
set: One or more list URLs for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r'(.*github\.com/[^/]+/[^/]+)',
lambda m: m.group(1) + '/releases'),
# GitLab API endpoint
# e.g. https://gitlab.dkrz.de/api/v4/projects/k202009%2Flibaec/repository/archive.tar.gz?sha=v1.0.2
(r'(.*gitlab[^/]+)/api/v4/projects/([^/]+)%2F([^/]+)',
lambda m: m.group(1) + '/' + m.group(2) + '/' + m.group(3) + '/tags'),
# GitLab non-API endpoint
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r'(.*gitlab[^/]+/(?!api/v4/projects)[^/]+/[^/]+)',
lambda m: m.group(1) + '/tags'),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r'(.*bitbucket.org/[^/]+/[^/]+)',
lambda m: m.group(1) + '/downloads/?tab=tags'),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(r'(.*\.r-project\.org/src/contrib)/([^_]+)',
lambda m: m.group(1) + '/Archive/' + m.group(2)),
]
list_urls = set([os.path.dirname(url)])
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
list_urls.add(fun(match))
return list_urls
def strip_query_and_fragment(path):
try:
components = urlsplit(path)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ''
if query:
suffix += '?' + query
if frag:
suffix += '#' + frag
return (urlunsplit(stripped), suffix)
except ValueError:
tty.debug("Got error parsing path %s" % path)
return (path, '') # Ignore URL parse errors here
def strip_version_suffixes(path):
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path (str): The filename or URL for the package
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
r'[Ii]nstall',
r'all',
r'code',
r'[Ss]ources?',
r'file',
r'full',
r'single',
r'with[a-zA-Z_-]+',
r'rock',
r'src(_0)?',
r'public',
r'bin',
r'binary',
r'run',
r'[Uu]niversal',
r'jar',
r'complete',
r'dynamic',
r'oss',
r'gem',
r'tar',
r'sh',
# Download version
r'release',
r'bin',
r'stable',
r'[Ff]inal',
r'rel',
r'orig',
r'dist',
r'\+',
# License
r'gpl',
# Arch
# Needs to come before and after OS, appears in both orders
r'ia32',
r'intel',
r'amd64',
r'linux64',
r'x64',
r'64bit',
r'x86[_-]64',
r'i586_64',
r'x86',
r'i[36]86',
r'ppc64(le)?',
r'armv?(7l|6l|64)',
# Other
r'cpp',
r'gtk',
r'incubating',
# OS
r'[Ll]inux(_64)?',
r'LINUX',
r'[Uu]ni?x',
r'[Ss]un[Oo][Ss]',
r'[Mm]ac[Oo][Ss][Xx]?',
r'[Oo][Ss][Xx]',
r'[Dd]arwin(64)?',
r'[Aa]pple',
r'[Ww]indows',
r'[Ww]in(64|32)?',
r'[Cc]ygwin(64|32)?',
r'[Mm]ingw',
r'centos',
# Arch
# Needs to come before and after OS, appears in both orders
r'ia32',
r'intel',
r'amd64',
r'linux64',
r'x64',
r'64bit',
r'x86[_-]64',
r'i586_64',
r'x86',
r'i[36]86',
r'ppc64(le)?',
r'armv?(7l|6l|64)?',
# PyPI
r'[._-]py[23].*\.whl',
r'[._-]cp[23].*\.whl',
r'[._-]win.*\.exe',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub(r'[._-]?' + regex + '$', '', path)
return path
def strip_name_suffixes(path, version):
"""Most tarballs contain a package name followed by a version number.
However, some also contain extraneous information in-between the name
and version:
* ``rgb-1.0.6``
* ``converge_install_2.3.16``
* ``jpegsrc.v9b``
These strings are not part of the package name and should be ignored.
This function strips the version number and any extraneous suffixes
off and returns the remaining string. The goal is that the name is
always the last thing in ``path``:
* ``rgb``
* ``converge``
* ``jpeg``
Args:
path (str): The filename or URL for the package
version (str): The version detected for this URL
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_name_offset
# NOTE: The problem is that we would have to add these regexes to every
# NOTE: single name regex. Easier to just strip them off permanently
suffix_regexes = [
# Strip off the version and anything after it
# name-ver
# name_ver
# name.ver
r'[._-][rvV]?' + str(version) + '.*',
# namever
r'V?' + str(version) + '.*',
# Download type
r'install',
r'[Ss]rc',
r'(open)?[Ss]ources?',
r'[._-]open',
r'[._-]archive',
r'[._-]std',
r'[._-]bin',
r'Software',
# Download version
r'release',
r'snapshot',
r'distrib',
r'everywhere',
r'latest',
# Arch
r'Linux(64)?',
r'x86_64',
# VCS
r'0\+bzr',
# License
r'gpl',
# Needs to come before and after gpl, appears in both orders
r'[._-]x11',
r'gpl',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub('[._-]?' + regex + '$', '', path)
return path
def split_url_extension(path):
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
prefix, ext, suffix = path, '', ''
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
match = re.search(r'(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$', path)
if match:
prefix, suffix = match.groups()
ext = comp.extension(prefix)
if ext is not None:
prefix = comp.strip_extension(prefix)
else:
prefix, suf = strip_query_and_fragment(prefix)
ext = comp.extension(prefix)
prefix = comp.strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ''
return prefix, ext, suffix
def determine_url_file_extension(path):
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r'github.com/.+/(zip|tar)ball/', path)
if match:
if match.group(1) == 'zip':
return 'zip'
elif match.group(1) == 'tar':
return 'tar.gz'
prefix, ext, suffix = split_url_extension(path)
return ext
def parse_version_offset(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (Version, int, int, int, str): A tuple containing:
version of the package,
first index of version,
length of version string,
the index of the matching regex
the matching regex
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
original_path = path
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the version number
stem = strip_version_suffixes(original_stem)
# Assumptions:
#
# 1. version always comes after the name
# 2. separators include '-', '_', and '.'
# 3. names can contain A-Z, a-z, 0-9, '+', separators
# 4. versions can contain A-Z, a-z, 0-9, separators
# 5. versions always start with a digit
# 6. versions are often prefixed by a 'v' or 'r' character
# 7. separators are most reliable to determine name/version boundaries
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the version of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
version_regexes = [
# 1st Pass: Simplest case
# Assume name contains no digits and version contains no letters
# e.g. libpng-1.6.27
(r'^[a-zA-Z+._-]+[._-]v?(\d[\d._-]*)$', stem),
# 2nd Pass: Version only
# Assume version contains no letters
# ver
# e.g. 3.2.7, 7.0.2-7, v3.3.0, v1_6_3
(r'^v?(\d[\d._-]*)$', stem),
# 3rd Pass: No separator characters are used
# Assume name contains no digits
# namever
# e.g. turbolinux702, nauty26r7
(r'^[a-zA-Z+]*(\d[\da-zA-Z]*)$', stem),
# 4th Pass: A single separator character is used
# Assume name contains no digits
# name-name-ver-ver
# e.g. panda-2016-03-07, gts-snapshot-121130, cdd-061a
(r'^[a-zA-Z+-]*(\d[\da-zA-Z-]*)$', stem),
# name_name_ver_ver
# e.g. tinyxml_2_6_2, boost_1_55_0, tbb2017_20161128
(r'^[a-zA-Z+_]*(\d[\da-zA-Z_]*)$', stem),
# name.name.ver.ver
# e.g. prank.source.150803, jpegsrc.v9b, atlas3.11.34, geant4.10.01.p03
(r'^[a-zA-Z+.]*(\d[\da-zA-Z.]*)$', stem),
# 5th Pass: Two separator characters are used
# Name may contain digits, version may contain letters
# name-name-ver.ver
# e.g. m4-1.4.17, gmp-6.0.0a, launchmon-v1.0.2
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver_ver
# e.g. icu4c-57_1
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z_]*)$', stem),
# name_name_ver.ver
# e.g. superlu_dist_4.1, pexsi_v0.9.0
(r'^[a-zA-Z\d+_]+_v?(\d[\da-zA-Z.]*)$', stem),
# name_name.ver.ver
# e.g. fer_source.v696
(r'^[a-zA-Z\d+_]+\.v?(\d[\da-zA-Z.]*)$', stem),
# name_ver-ver
# e.g. Bridger_r2014-12-01
(r'^[a-zA-Z\d+]+_r?(\d[\da-zA-Z-]*)$', stem),
# name-name-ver.ver-ver.ver
# e.g. sowing-1.1.23-p1, bib2xhtml-v3.0-15-gf506, 4.6.3-alpha04
(r'^(?:[a-zA-Z\d+-]+-)?v?(\d[\da-zA-Z.-]*)$', stem),
# namever.ver-ver.ver
# e.g. go1.4-bootstrap-20161024
(r'^[a-zA-Z+]+v?(\d[\da-zA-Z.-]*)$', stem),
# 6th Pass: All three separator characters are used
# Name may contain digits, version may contain letters
# name_name-ver.ver
# e.g. the_silver_searcher-0.32.0, sphinx_rtd_theme-0.1.10a0
(r'^[a-zA-Z\d+_]+-v?(\d[\da-zA-Z.]*)$', stem),
# name.name_ver.ver-ver.ver
# e.g. TH.data_1.0-8, XML_3.98-1.4
(r'^[a-zA-Z\d+.]+_v?(\d[\da-zA-Z.-]*)$', stem),
# name-name-ver.ver_ver.ver
# e.g. pypar-2.1.5_108
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z._]*)$', stem),
# name.name_name-ver.ver
# e.g. tap.py-1.6, backports.ssl_match_hostname-3.5.0.1
(r'^[a-zA-Z\d+._]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-namever.ver_ver.ver
# e.g. STAR-CCM+11.06.010_02
(r'^[a-zA-Z+-]+(\d[\da-zA-Z._]*)$', stem),
# name-name_name-ver.ver
# e.g. PerlIO-utf8_strict-0.002
(r'^[a-zA-Z\d+_-]+-v?(\d[\da-zA-Z.]*)$', stem),
# 7th Pass: Specific VCS
# bazaar
# e.g. libvterm-0+bzr681
(r'bzr(\d[\da-zA-Z._-]*)$', stem),
# 8th Pass: Query strings
# e.g. https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0
# e.g. https://gitlab.kitware.com/api/v4/projects/icet%2Ficet/repository/archive.tar.bz2?sha=IceT-2.1.1
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
# e.g. https://software.broadinstitute.org/gatk/download/auth?package=GATK-archive&version=3.8-1-0-gf15c1c3ef
(r'[?&](?:sha|ref|version)=[a-zA-Z\d+-]*[_-]?v?(\d[\da-zA-Z._-]*)$', suffix), # noqa: E501
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
# e.g. http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz
# e.g. https://evtgen.hepforge.org/downloads?f=EvtGen-01.07.00.tar.gz
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'[?&](?:filename|f|get)=[a-zA-Z\d+-]+[_-]v?(\d[\da-zA-Z.]*)', stem),
# 9th Pass: Version in path
# github.com/repo/name/releases/download/vver/name
# e.g. https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow
(r'github\.com/[^/]+/[^/]+/releases/download/[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)/', path), # noqa: E501
# e.g. ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.26/ncbi.tar.gz
(r'(\d[\da-zA-Z._-]*)/[^/]+$', path),
]
for i, version_regex in enumerate(version_regexes):
regex, match_string = version_regex
match = re.search(regex, match_string)
if match and match.group(1) is not None:
version = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return version, start, len(version), i, regex
raise UndetectableVersionError(original_path)
def parse_version(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
spack.version.Version: The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
version, start, length, i, regex = parse_version_offset(path)
return Version(version)
def parse_name_offset(path, v=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
v (str): The version of the package
Returns:
tuple of (str, int, int, int, str): A tuple containing:
name of the package,
first index of name,
length of name,
the index of the matching regex
the matching regex
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
original_path = path
# We really need to know the version of the package
# This helps us prevent collisions between the name and version
if v is None:
try:
v = parse_version(path)
except UndetectableVersionError:
# Not all URLs contain a version. We still want to be able
# to determine a name if possible.
v = 'unknown'
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the package name
stem = strip_name_suffixes(original_stem, v)
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the name of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
name_regexes = [
# 1st Pass: Common repositories
# GitHub: github.com/repo/name/
# e.g. https://github.com/nco/nco/archive/4.6.2.tar.gz
(r'github\.com/[^/]+/([^/]+)', path),
# GitLab API endpoint: gitlab.*/api/v4/projects/NAMESPACE%2Fname/
# e.g. https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0
(r'gitlab[^/]+/api/v4/projects/[^/]+%2F([^/]+)', path),
# GitLab non-API endpoint: gitlab.*/repo/name/
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'gitlab[^/]+/(?!api/v4/projects)[^/]+/([^/]+)', path),
# Bitbucket: bitbucket.org/repo/name/
# e.g. https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2
(r'bitbucket\.org/[^/]+/([^/]+)', path),
# PyPI: pypi.(python.org|io)/packages/source/first-letter/name/
# e.g. https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz
# e.g. https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz
(r'pypi\.(?:python\.org|io)/packages/source/[A-Za-z\d]/([^/]+)', path),
# 2nd Pass: Query strings
# ?filename=name-ver.ver
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=([A-Za-z\d+-]+)$', stem),
# ?f=name-ver.ver
# e.g. https://evtgen.hepforge.org/downloads?f=EvtGen-01.07.00.tar.gz
(r'\?f=([A-Za-z\d+-]+)$', stem),
# ?package=name
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=([A-Za-z\d+-]+)', stem),
# ?package=name-version
(r'\?package=([A-Za-z\d]+)', suffix),
# download.php
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'([^/]+)/download.php$', path),
# 3rd Pass: Name followed by version in archive
(r'^([A-Za-z\d+\._-]+)$', stem),
]
for i, name_regex in enumerate(name_regexes):
regex, match_string = name_regex
match = re.search(regex, match_string)
if match:
name = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return name, start, len(name), i, regex
raise UndetectableNameError(original_path)
def parse_name(path, ver=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
ver (str): The version of the package
Returns:
str: The name of the package
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
name, start, length, i, regex = parse_name_offset(path, ver)
return name
def parse_name_and_version(path):
"""Try to determine the name of a package and extract its version
from its filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (str, Version)A tuple containing:
The name of the package
The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
UndetectableNameError: If the URL does not match any regexes
"""
ver = parse_version(path)
name = parse_name(path, ver)
return (name, ver)
def insensitize(string):
"""Change upper and lowercase letters to be case insensitive in
the provided string. e.g., 'a' becomes '[Aa]', 'B' becomes
'[bB]', etc. Use for building regexes."""
def to_ins(match):
char = match.group(1)
return '[%s%s]' % (char.lower(), char.upper())
return re.sub(r'([a-zA-Z])', to_ins, string)
def cumsum(elts, init=0, fn=lambda x: x):
"""Return cumulative sum of result of fn on each element in elts."""
sums = []
s = init
for i, e in enumerate(elts):
sums.append(s)
s += fn(e)
return sums
def find_all(substring, string):
"""Returns a list containing the indices of
every occurrence of substring in string."""
occurrences = []
index = 0
while index < len(string):
index = string.find(substring, index)
if index == -1:
break
occurrences.append(index)
index += len(substring)
return occurrences
def substitution_offsets(path):
"""This returns offsets for substituting versions and names in the
provided path. It is a helper for :func:`substitute_version`.
"""
# Get name and version offsets
try:
ver, vs, vl, vi, vregex = parse_version_offset(path)
name, ns, nl, ni, nregex = parse_name_offset(path, ver)
except UndetectableNameError:
return (None, -1, -1, (), ver, vs, vl, (vs,))
except UndetectableVersionError:
try:
name, ns, nl, ni, nregex = parse_name_offset(path)
return (name, ns, nl, (ns,), None, -1, -1, ())
except UndetectableNameError:
return (None, -1, -1, (), None, -1, -1, ())
# Find the index of every occurrence of name and ver in path
name_offsets = find_all(name, path)
ver_offsets = find_all(ver, path)
return (name, ns, nl, name_offsets,
ver, vs, vl, ver_offsets)
def wildcard_version(path):
"""Find the version in the supplied path, and return a regular expression
that will match this path with any version in its place.
"""
# Get version so we can replace it with a wildcard
version = parse_version(path)
# Split path by versions
vparts = path.split(str(version))
# Replace each version with a generic capture group to find versions
# and escape everything else so it's not interpreted as a regex
result = r'(\d.*)'.join(re.escape(vp) for vp in vparts)
return result
def substitute_version(path, new_version):
"""Given a URL or archive name, find the version in the path and
substitute the new version for it. Replace all occurrences of
the version *if* they don't overlap with the package name.
Simple example:
.. code-block:: python
substitute_version('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '2.9.3')
>>> 'http://www.mr511.de/software/libelf-2.9.3.tar.gz'
Complex example:
.. code-block:: python
substitute_version('https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz', '2.3')
>>> 'https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz'
"""
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
new_path = ''
last = 0
for vo in voffs:
new_path += path[last:vo]
new_path += str(new_version)
last = vo + vl
new_path += path[last:]
return new_path
def color_url(path, **kwargs):
"""Color the parts of the url according to Spack's parsing.
Colors are:
| Cyan: The version found by :func:`parse_version_offset`.
| Red: The name found by :func:`parse_name_offset`.
| Green: Instances of version string from :func:`substitute_version`.
| Magenta: Instances of the name (protected from substitution).
Args:
path (str): The filename or URL for the package
errors (bool): Append parse errors at end of string.
subs (bool): Color substitutions as well as parsed name/version.
"""
# Allow URLs containing @ and }
path = cescape(path)
errors = kwargs.get('errors', False)
subs = kwargs.get('subs', False)
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
nends = [no + nl - 1 for no in noffs]
vends = [vo + vl - 1 for vo in voffs]
nerr = verr = 0
out = StringIO()
for i in range(len(path)):
if i == vs:
out.write('@c')
verr += 1
elif i == ns:
out.write('@r')
nerr += 1
elif subs:
if i in voffs:
out.write('@g')
elif i in noffs:
out.write('@m')
out.write(path[i])
if i == vs + vl - 1:
out.write('@.')
verr += 1
elif i == ns + nl - 1:
out.write('@.')
nerr += 1
elif subs:
if i in vends or i in nends:
out.write('@.')
if errors:
if nerr == 0:
out.write(" @r{[no name]}")
if verr == 0:
out.write(" @r{[no version]}")
if nerr == 1:
out.write(" @r{[incomplete name]}")
if verr == 1:
out.write(" @r{[incomplete version]}")
return colorize(out.getvalue())
class UrlParseError(spack.error.SpackError):
"""Raised when the URL module can't parse something correctly."""
def __init__(self, msg, path):
super(UrlParseError, self).__init__(msg)
self.path = path
class UndetectableVersionError(UrlParseError):
"""Raised when we can't parse a version from a string."""
def __init__(self, path):
super(UndetectableVersionError, self).__init__(
"Couldn't detect version in: " + path, path)
class UndetectableNameError(UrlParseError):
"""Raised when we can't parse a package name from a string."""
def __init__(self, path):
super(UndetectableNameError, self).__init__(
"Couldn't parse package name in: " + path, path)
|
iulian787/spack
|
lib/spack/spack/url.py
|
Python
|
lgpl-2.1
| 31,342
|
[
"BLAST",
"HOOMD-blue",
"VTK"
] |
9f29f0cee25bbedc7a1b6898e079b277bad22df88d052bf7c6d7c079c61559e0
|
#!/usr/bin/env python
try:
from netCDF4 import Dataset
except:
print "netCDF4 is not installed!"
sys.exit(1)
import numpy as np
import pylab as plt
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog [options] FILE"
parser.description = "A script to compare PISM flowline velocities with Stokes solution."
(options, args) = parser.parse_args()
plot_acab = True
if len(args) != 1:
print('wrong number of arguments, 1 expected')
exit(1)
try:
nc = Dataset(args[0], 'r')
except:
print(("file %s not found ... ending ..." % args[0]))
exit(2)
def permute(variable, output_order = ('time', 'z', 'zb', 'y', 'x')):
"""Permute dimensions of a NetCDF variable to match the output storage order."""
input_dimensions = variable.dimensions
# filter out irrelevant dimensions
dimensions = filter(lambda(x): x in input_dimensions,
output_order)
# create the mapping
mapping = map(lambda(x): dimensions.index(x),
input_dimensions)
if mapping:
return np.transpose(variable[:], mapping)
else:
return variable[:] # so that it does not break processing "mapping"
x = nc.variables["x"][:]
b = np.squeeze(nc.variables["topg"][:])
s = np.squeeze(nc.variables["usurf"][:])
h = np.squeeze(nc.variables["thk"][:])
z = nc.variables["z"][:]
mask = np.zeros_like(h)
mask[h<=1] = 1
us = np.ma.array(data=np.squeeze(nc.variables["uvelsurf"][:]),mask=mask)
ub = np.ma.array(data=np.squeeze(nc.variables["uvelbase"][:]),mask=mask)
## stuff needed for contour plots
xx = (np.tile(x,[len(z),1]))
zz = ((np.tile(z,[len(x),1])).transpose() + b)
# ignore the first level
cts = np.squeeze(permute(nc.variables["cts"]))
liqfrac = np.squeeze(permute(nc.variables["liqfrac"]))
temppa = np.squeeze(permute(nc.variables["temp_pa"]))
mask2 = np.zeros_like(cts)
mask2[zz>s] = 1
cts = np.ma.array(data=cts,mask=mask2)
liqfrac = np.ma.array(data=liqfrac,mask=mask2)
temppa = np.ma.array(data=temppa,mask=mask2)
## Contour level of the CTS
cts_level = [1,1]
liqfrac_levels = np.arange(0,2.5,.25)
temppa_levels = [-6,-5,-4,-3,-2,-1,-.0001]
fig = plt.figure(figsize=(6.4,7.4))
axUpperLeft = plt.axes([0.1,0.6,0.8,0.25])
axLower = plt.axes([0.1,0.05,0.8,0.5])
axUpperLeft.plot(x,us,color='#377EB8', lw = 1.5)
axUpperLeft.plot(x,ub,'--',color='#377EB8', lw = 1.5)
axUpperLeft.axes.set_xlim(-250, 3500)
axUpperLeft.axes.set_ylabel("velocity [m a$^{-1}$]")
plt.setp(axUpperLeft, xticks=[])
if (plot_acab == True):
acab = np.squeeze(nc.variables["climatic_mass_balance"][:])
axUpperRight = axUpperLeft.twinx()
axUpperRight.plot(x, acab / 910.0, color='#984EA3',lw=1.5)
axUpperRight.axes.set_ylabel("mass balance [m a$^{-1}$]")
axUpperRight.axes.set_xlim(-250, 3500)
axLower.plot(x,b,color='black', lw = 1.5)
axLower.plot(x,s,color='black', lw = 1.5)
c1=axLower.contourf(xx,zz,liqfrac*100,liqfrac_levels,cmap=plt.cm.Reds)
plt.colorbar(mappable=c1,ax=axLower,orientation='horizontal',pad=0.05,shrink=0.75,extend="max")
c2=axLower.contourf(xx,zz,temppa,temppa_levels,cmap=plt.cm.Blues_r,lw = 1)
plt.colorbar(mappable=c2,ax=axLower,orientation='horizontal',ticks=[-6,-5,-4,-3,-2,-1,0],pad=0.20,shrink=0.75)
axLower.contour(xx,zz,cts,cts_level,colors='black',linestyles='dashed',lw = 1)
axLower.axes.set_xlim(-250, 3500)
axLower.axes.set_ylim(1100,1800)
axLower.axes.set_xlabel("distance from bergschrund [m]")
axLower.axes.set_ylabel("elevation [m a.s.l.]")
plt.savefig('sg_results.pdf',bbox_inches='tight',pad_inches=0.35)
nc.close()
|
talbrecht/pism_pik06
|
examples/storglaciaren/plot_flowline_results.py
|
Python
|
gpl-3.0
| 3,592
|
[
"NetCDF"
] |
b31a241668105c8d0728e667180d0d1b835e10a6beeac9bae34c04a9600d82a3
|
### Input parameters for PAScual.
### by Carlos Pascual-Izarra < cpascual [AT] users.sourceforge.net > 2007
### '''
### This file is part of PAScual.
### PAScual: Positron Annihilation Spectroscopy data analysis
### Copyright (C) 2007 Carlos Pascual-Izarra < cpascual [AT] users.sourceforge.net >
###
### This program is free software: you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation, either version 3 of the License, or
### (at your option) any later version.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program. If not, see <http://www.gnu.org/licenses/>.
### '''
#########DO NOT TOUCH THIS BLOCK ##############################
from glob import glob as matchfilenames
from scipy import inf, array, arange, log, exp, concatenate, savetxt
############# END OF "DO-NOT-TOUCH" BLOCK #####################
######### In principle, this is the only file you need to touch for a regular calculation
######### You can modify the parameters below this line
######### IMPORTANT NOTES ON SYNTAX FOR THIS FILE:
# # 1- Regular Python syntax format applies. The hash "#" character is a comment. You can call Python functions and operators if you want.
# # 2- Parameters marked with (*1) can optionally be given as a list (for multiple spectra fit).
# # If a non-list value is provided it will be used for all the spectra.
# # 3- Parameters marked with (*2) are direct descriptions of fitting parameters. They can be one of the following values:
# # a) a string being the name of a named parameter, with an optional "*" for making common parameters (see the named parameter section below)
# # b) a single numerical value. This means that the parameter is FIXED (non-free).
# # c) a tuple containing (value,minimum,maximum). This means it is a free parameter.
# # 4- Parameters marked with (*3) accept the same syntax as those with (*1) except in that they are extended over the number of
# # palssets (instead of the number of spectra). Note that the palssets are automatically created by optimally distributing the
# # given spectra. Therefore, the usage of lists for these parameters is only for very advanced users who know what they are doing
##=================================================================================================================================================
######Experimental data
#### Indicate which experimental data is going to be analysed. You can make use of a path variable.
#### You can also use the matchfilenames() function in order to use wildcards: *, ?, [-],....
# expfilenames=['data/KansyA.dat'] #(*1) list of names (optionally with a path) for the experimental file(s)
# expfilenames=['data/KansyA.dat','data/KansyB.dat','data/KansyC.dat','data/KansyD.dat']
# path="C:\Documents and Settings\pas064\My Documents\PALS\Aurelia\Al holder\PHYT_T20/selectedsums/"
# path="C:\Documents and Settings\pas064\My Documents\PALS\Aurelia\Al holder\PHYT_T20/"
path = './examples/'
# expfilenames=[]
# expfilenames=matchfilenames(path+'Pure*/??_???*sum.al2')
# expfilenames+=matchfilenames(path+'3%*/??_???*sum.al2')
# expfilenames+=(matchfilenames(path+'5%*/??_???*sum.al2')+matchfilenames(path+'5%*/05B_???.al2'))
# expfilenames+=matchfilenames(path+'8%*/08_???.al2')
# expfilenames+=matchfilenames(path+'11%*/??_???*sum.al2')
# expfilenames+=matchfilenames(path+'13/??_???.al2')
# expfilenames+=matchfilenames(path+'15/??_???*sum.al2')
# expfilenames+=matchfilenames(path+'17/??_???.al2')
# expfilenames+=matchfilenames(path+'19/??_???.al2')
# expfilenames+=matchfilenames(path+'22/??_???.al2')
# expfilenames+=matchfilenames(path+'25/??_???.al2')
# expfilenames+=matchfilenames(path+'26/??_???.al2')[:10]
# expfilenames+=matchfilenames(path+'27/??_???.al2')
# expfilenames+=matchfilenames(path+'28/??_???.al2')[:10]
expfilenames = matchfilenames(path + '*.dat')
# expfilenames+=matchfilenames(path+'CPI_A_*.dat')
# expfilenames=5*[None]
# skipfilenames=(matchfilenames(path+'28_1??.al2')) # list of file names names to ignore (useful if matchfilenames() returned some unwanted file names)
headerlines = 4 # (*1) Number of rows to skip when reading the exp file (to skip the header). Put to 0 if no header.
####### Calibration.
psperchannel = 50 # (*1) Channel width in picoseconds.
######ROI definition.
#### Any roi can be given. Just do an array (or list) containing channel numbers in the ROI.
#### Examples:
# roi=arange(213,879) #roi between channels 10 and 800
# roi=range(10,200)+range(400,500) #roi defined in by two subranges # (*1) The whole spectrum
#### OR, ALTERNATIVELY, instead of directly defining the roi, you can define the left_of_max and stopdat variables as in MELT.
left_of_max = 5 # (*1) Start this much at the left of the channel with the max number of counts.
stopdat = 689 # (*1) Channel where the data stops.
####### Named parameters
#### For convenience, it is possible to assign a name to a parameter.
#### This is done in a "dictionary": {'parname1':(val1,min1,max1), 'parname2':(val2,min2,max2),...}
#### The parameters defined here can be later referred to by their name.
#### When **referring** to the parameter, if just its name is used, a unique COPY of that parameter will be used and
#### therefore that parameter will be independent (as opposed to "common")
#### On the other hand, if a "*" is appended to the name when referring to it, a copy is NOT made (allowing to have common parameters)
namedparameters = {
'fwhm': (270, 100, 500),
'c0': (30, 10, 100),
'bg': (20, 1, 1000),
'tau_pPs': (125, 50, 250), 'tau_drt': (400, 250, 600),
'tau_oPs': (3000, 600, 142e3),
'tau1': (125, 50, 350), 'tau2': (400, 150, 600), 'tau3': (1900, 500, 142e3),
'tau': (300, psperchannel, 142e3), 'tauKansy': (300, psperchannel, 142e3),
'ity': (1, 0, None), 'ityKansy': (1, 0, None),
'tau4': (1300, 800, 5000), 'at1': (1500, 1000, 4000), 'at2': 1.7,
'tausrc': 1630, 'itysrc': .066, 'tauH2O': (1800, 1600, 2000),
'tauPHYT': (2800, 2000, 3500)}
######Background.
####Note: unless you fix the background, it doesn't really matter to give a precise value.
# bg=(100,0,1e4) # (*1)(*2)
#### ALTERNATIVELY: If the baseline is present in a region of your spectrum, you can just give its starting and end channels
#### In this case, it is assumed to be free non-common parameter(s) and the min and max will be automatically calculated
startbg = 660 # (*1) first bin for background initialisation
stopbg = 680 # (*1) last bin for background initialisation
# bg=(20,1,200)
# bg=25*5
# bg=[5e-5*1e6*float(a) for a in areaList ]
# print bg
# raw_input()
###### FWHM.
#### Resolution function Full Width Half Maximum in ps (assuming Gaussian shape)
# fwhm=(300,200,400) # (*1)(*2) FWHM
# fwhm="fwhm"
fwhm = (280, None, None)
# fwhm=270
###### Offset.
#### Channel for time 0.
# c0=(100,10,500) # (*1)(*2) Calibration offset. If omitted, it will be assumed to be a free non-common parameter and (val,min,max) will be automatically calculated
# c0=[20,"c0","c0",20]
# c0=100
####### Lifetimes
#### Define the lifetimes as a TUPLE of fitpar descriptors, i.e., as (t1,t2,t3,...), where ti is one of the cases described by (*2)
#### note that a "list of tuples of descriptors" is also possible just following (*1)
tau = ((100, 50, 200), (400, 200, 500), (
1000, 500, 1.42e5)) # (*1)(*2) lifetimes in ps. Note, min>0 and max<1.42e5ps
# tau=[("tau1","tau2","tau3",1000), ("tau1","tau2","tau3",1050)]+2*[(234,"Ctau2")]
# tau=[("tau1","tau2","tau3","tau4")]+2*[(234,"Ctau2")]
# tau=('tau1*','tau2*','tau3*',(1140,50,150000))
# tau=[((100,psperchannel,142e3),(300,psperchannel,142e3),(500,psperchannel,142e3),(5000,psperchannel,142e3))]+3*[('tau','tau')]
# tau=[((100,psperchannel,142e3),(300,psperchannel,142e3),(500,psperchannel,142e3),(5000,psperchannel,142e3))]
# tau=[('tau','tau','tau','tau')]+3*[('tauKansy*','tau')]
# tau=[((100,50,180),(180,170,300))]
# tau=[('tau','tau')]
# tau=[('tau','tau','tau','tau')]
# tau=(125,"tau_drt","tau_oPs","tauPHYT")
# tau=(125,"tau_drt","tau_oPs",1905,418)
# tau=(125, (400,1,142e3), 1800, (3000,1,142e3))
# ity=('ity','ity',.05,'ity')
# tau=[(125, 'tau_drt', 'tauH2O', t) for t in tarray]
# ity=(.15, .20 , .05, .60)
# ity=(.20,.60,.20)
# ity=(.25,.25,.25,.25)
# tau=(125,'tau','tau')
####### Intensities
#### You can define the lifetimes as a tuple, including common parameters
# ity=None # (*1)(*2) Same syntax as for tau. (if omitted, it will be initialised as val=1,min=0,max=inf)
# ity=[('ity','ity','ity','ity')]+3*[('ityKansy*','ity*')]
# ity=('ity','ity','ity','ity',)
# ity=('ity','ity','ity',0.035,0.155)
##=================================================================================================================================================
####### Fitology:
#### These parameters control the way the fitting is done.
#### Three tools are implemented for fitting: Simulated annealing (SA), Bayesian Inference (BI) and Local Search (LOCAL).
#### Read the documentation to find out about each of them.
#### Rougthly speaking: SA is good to find the global minimum but it is slow (use it as an initialisation tool).
#### BI should be run after the global minimum is found in order to calculate the errors. Use it after SA
#### LOCAL is not robust but it is fast once you are near the minimum. Use it after SA.
fitmode = ('LOCAL', 'LOG ' + path + 'results.txt')
# fitmode=('LOAD','SA','LOCAL','BI','SAVE')
# fitmode=('LOAD','BI','REPORT','SAVE')
# fitmode=[('SA','SAVE kk0_SA','BI','SAVE kk0_BI','LOCAL','SAVE kk0_LOCAL'),('SA','SAVE kk1_SA','BI','SAVE kk1_BI','LOCAL','SAVE kk1_LOCAL'),('SA','SAVE kk2_SA','BI','SAVE kk2_BI','LOCAL','SAVE kk2_LOCAL'),('SA','SAVE kk3_SA','BI','SAVE kk3_BI','LOCAL','SAVE kk3_LOCAL')]
# fitmode=[('LOAD kk0_SA','REPORT','LOAD kk0_BI','REPORT'),('LOAD kk1_SA','REPORT','LOAD kk1_BI','REPORT'),('LOAD kk2_SA','REPORT','LOAD kk2_BI','REPORT'),('LOAD kk3_SA','REPORT','LOAD kk3_BI','REPORT')]
# fitmode=('LOAD','LOCAL','SAVE','LOG '+path+'dp-allfree.dat')
# fitmode=('SA','LOG '+path+'kk.dat')
# fitmode=[('SA','SAVE','LOCAL','LOG '+path+'kk.dat',)]+[('LOAD','LOCAL','LOG '+path+'kk.dat',)]*(len(expfilenames)-1)
# fitmode=('LOAD','LOCAL','SAVE','LOG '+path+'results.txt')
# fitmode=[('LOCAL','LOG '+path+'results.txt','SAVE %s_SA.sav'%fn) for fn in expfilenames]
# fitmode=('SA','LOG '+path+'results.txt')
# fitmode=[('FAKE 2e7 %sCPI_A_%03i.dat'%(path,i+1),'LOG %sCPI_A_perfect.txt'%path) for i in range(len(expfilenames))]
# fitmode=[('FAKE 2e6 %sCPI_D_%03i.dat'%(path,i+1),'LOG %sCPI_D_perfect.txt'%path) for i in range(len(expfilenames))]
# fitmode=[('FAKE 5e5 %sCPI_G_%03i.dat'%(path,i),'LOG %sCPI_G_perfect.txt'%path) for i in T]
# fitmode=[('FAKE 5e5 %sCPI_G_%03i_%03i.dat'%(path,T[i],i%nrepeats),'LOG %sCPI_G_perfect.txt'%path) for i in xrange(T.size)]
# fitmode=[('FAKE 25e5 %sCPI_J_%02i_%03i.dat'%(path,int(ity4[i]*100),i%nrepeats),'LOG %sCPI_J_perfect.txt'%path) for i in xrange(ity4.size)]
# fitmode=[('FAKE %f %sCPI_F_%s.dat'%(float(A)*1e6,path,A),'LOG %sCPI_F_perfect.txt'%path) for A in areaList]
# fitmode=('LOAD','LOCAL NOLIMITS','SAVE','LOG '+path+'CPI_A_results.txt')
# fitmode=('LOCAL NOLIMITS','LOG '+path+'CPI_A_results.txt')
# fitmode=('LOCAL','LOG kk')
######Output
###Output data file. See following examples:
# outputfile=path+'output.txt' #a name
outputfile = None # no output file creation
# outputfile=expfilename.rsplit('.',1)[0]+'.out' #same as the experimental but with .out extension
# outputfile=expfilename.rsplit('.',1)[0]+'20.out'
BI_report = 500 # A report will be shown every this steps during BI (put to -1 for no reports). Be Careful: too much reports may slow down the calc.
####Advanced fitting parameters
#### IMPORTANT:
#### The following parameters are not supposed to be changed by regular users. They deal with internal algorithmical choices.
#### The default values are generally correct. Don't mess with them if you don't know exactly what you are doing
SA_tol = 1e-5 # (*3)Tolerance for stopping the SA
SA_stopT = .1 # (*3)Stop temperature for SA (put to 0 to disable). (SA_stopT>1 is not recommended)
SA_maxiter = inf # (*3)Max number of iterations in the SimAnn fit
SA_direct = True # (*3)Whether to use the direct mode in NNRLA for SA. Note: If SA_NNRLA=False (or <=0), SA_direct is ignored.
SA_meltratio = 0.97 # (*3)The "melting" phase of the SA will stop when this acceptance ratio is reached
LOCAL_tol = 0 # (NOT USED) (*3)Local search tolerance (the lower, the more time it will take). Put this to 0 to skip Local search and ~1e-5 for calculating
LOCAL_maxiter = 1e5 # (NOT USED) (*3)Max number of iterations in the LOCAL fit.
BI_stab = 5000 # (*3)This much steps (multiplied by the order of the searching space!) of BI will be done and not considered for statistical purposes. Put this to 0 to skip stabilisation.
BI_length = 50000 # (*3)This much steps (multiplied by the order of the searching space) will be calculated by BI.
seed = 1345 # Seed for pseudorandom generator
if __name__ == '__main__':
from PAScual import *
try:
import pylab
except:
print >> sys.stderr, "Pylab could not be imported. Graphical output won't be supported"
safemain()
|
cpascual/PAScual
|
PAScual/PAScual_input.py
|
Python
|
gpl-3.0
| 13,938
|
[
"Gaussian"
] |
bd2b57277af820615f374d2e49c9f5128f421f588a19e07fab14f98efc8c47c1
|
#!/usr/bin/env python
##############################################################################
#
# Usage example for the procedure PPXF, which
# implements the Penalized Pixel-Fitting (pPXF) method by
# Cappellari M., & Emsellem E., 2004, PASP, 116, 138.
#
# This example shows how to fit multiple stellar components with different
# stellar population and kinematics.
#
# MODIFICATION HISTORY:
# V1.0.0: Early test version. Michele Cappellari, Oxford, 20 July 2009
# V1.1.0: Cleaned up for the paper by Johnston et al. (MNRAS, 2013).
# MC, Oxford, 26 January 2012
# V2.0.0: Converted to Python and adapted to the changes in the new public
# PPXF version, Oxford 8 January 2014
# V2.0.1: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# V2.0.2: Support both Pyfits and Astropy to read FITS files.
# MC, Oxford, 22 October 2015
#
##############################################################################
from __future__ import print_function
try:
import pyfits
except:
from astropy.io import fits as pyfits
from scipy import signal
import numpy as np
from time import clock
import matplotlib.pyplot as plt
from ppxf import ppxf
import ppxf_util as util
def ppxf_two_components_example():
velscale = 30.
hdu = pyfits.open('spectra/Rbi1.30z+0.00t12.59.fits') # Solar metallicitly, Age=12.59 Gyr
gal_lin = hdu[0].data
h1 = hdu[0].header
lamRange1 = h1['CRVAL1'] + np.array([0., h1['CDELT1']*(h1['NAXIS1']-1)])
model1, logLam1, velscale = util.log_rebin(lamRange1, gal_lin, velscale=velscale)
model1 /= np.median(model1)
hdu = pyfits.open('spectra/Rbi1.30z+0.00t01.00.fits') # Solar metallicitly, Age=1.00 Gyr
gal_lin = hdu[0].data
model2, logLam1, velscale = util.log_rebin(lamRange1, gal_lin, velscale=velscale)
model2 /= np.median(model2)
model = np.column_stack([model1, model2])
galaxy = np.empty_like(model)
# These are the input values in spectral pixels
# for the (V,sigma) of the two kinematic components
#
vel = np.array([0., 250.])/velscale
sigma = np.array([200., 100.])/velscale
# The synthetic galaxy model consists of the sum of two
# SSP spectra with age of 1Gyr and 13Gyr respectively
# with different velocity and dispersion
#
for j in range(len(vel)):
dx = int(abs(vel[j]) + 4.*sigma[j]) # Sample the Gaussian at least to vel+4*sigma
v = np.linspace(-dx, dx, 2*dx + 1)
losvd = np.exp(-0.5*((v - vel[j])/sigma[j])**2) # Gaussian LOSVD
losvd /= np.sum(losvd) # normaize LOSVD
galaxy[:, j] = signal.fftconvolve(model[:, j], losvd, mode="same")
galaxy[:, j] /= np.median(model[:, j])
galaxy = np.sum(galaxy, axis=1)
sn = 200.
np.random.seed(2) # Ensure reproducible results
galaxy = np.random.normal(galaxy, galaxy/sn) # add noise to galaxy
# Adopts two templates per kinematic component
#
templates = np.column_stack([model1, model2, model1, model2])
# Start both kinematic components from the same guess.
# With multiple stellar kinematic components
# a good starting guess is essential
#
start = [np.mean(vel)*velscale, np.mean(sigma)*velscale]
start = [start, start]
goodPixels = np.arange(20, 1280)
t = clock()
plt.clf()
plt.subplot(211)
plt.title("Two components pPXF fit")
print("+++++++++++++++++++++++++++++++++++++++++++++")
pp = ppxf(templates, galaxy, galaxy*0+1, velscale, start,
goodpixels=goodPixels, plot=True, degree=4,
moments=[4, 4], component=[0, 0, 1, 1])
plt.subplot(212)
plt.title("Single component pPXF fit")
print("---------------------------------------------")
start = start[0]
pp = ppxf(templates, galaxy, galaxy*0+1, velscale, start,
goodpixels=goodPixels, plot=True, degree=4, moments=4)
plt.tight_layout()
plt.pause(0.01)
print("=============================================")
print("Total elapsed time %.2f s" % (clock() - t))
#------------------------------------------------------------------------------
if __name__ == '__main__':
ppxf_two_components_example()
|
moustakas/impy
|
lib/ppxf/ppxf_two_components_example.py
|
Python
|
gpl-2.0
| 4,201
|
[
"Galaxy",
"Gaussian"
] |
815f27209cc81ab0ab540e3b48356ca5568d83ff0593db24fe561bdf26deadbd
|
####### We start by importing some useful packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
import sys, getopt
from pysit import *
from pysit.gallery import marmousi
from pysit.util.parallel import *
from mpi4py import MPI
from scipy.io import savemat, loadmat
####### Here we define the plots
def plot_func(fig_nr, arr_2d, x_min, x_max, z_min, z_max, x_label, z_label, title, cbar_min=None, cbar_max=None):
fig = plt.figure(fig_nr)
ax = fig.add_subplot(111)
im = ax.imshow(arr_2d, extent=[x_min,x_max,z_max,z_min], interpolation="nearest")
im.axes.yaxis.set_label_text(z_label, fontsize = 10)
im.axes.xaxis.set_label_text(x_label, fontsize = 10)
im.axes.set_title(title, fontsize = 10)
if cbar_min !=None and cbar_max !=None:
norm = mpl.colors.Normalize(vmin=cbar_min, vmax=cbar_max)
im.set_norm(norm)
cb = plt.colorbar(im, ticks=np.linspace(cbar_min, cbar_max, 5))
else:
cb = plt.colorbar(im)
return fig
####### Here we define parallel shots
def make_parallel_shots(pwrap, nsources, x_pos_sources_arr_all, z_pos_sources, x_pos_receivers_arr_all, z_pos_receivers, peakfreq):
min_nr_per_process = nsources / pwrap.size
nr_leftover_processes = nsources % (min_nr_per_process * pwrap.size)
nr_shots_this_process = min_nr_per_process
if pwrap.rank < nr_leftover_processes:
nr_shots_this_process += 1
local_shots = []
for i in xrange(nr_shots_this_process):
all_shot_index = i*pwrap.size + pwrap.rank
print "CREATING SHOT WITH INDEX: %i"%all_shot_index
source = PointSource(m, (x_pos_sources_arr_all[all_shot_index], z_pos_sources), RickerWavelet(peakfreq), approximation='gaussian')
####### Here we define set of receivers
receivers = ReceiverSet(m, [PointReceiver(m, (x, z_pos_receivers), approximation='gaussian') for x in x_pos_receivers])
####### Here we create and store the shots
shot = Shot(source, receivers)
local_shots.append(shot)
return local_shots
if __name__ == '__main__':
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
print "size = %i and rank = %i"%(size, rank)
pwrap = ParallelWrapShot(comm=comm)
####### Here we set the wave speed
WaveSpeed.add_lower_bound(1000.0)
WaveSpeed.add_upper_bound(6500.0)
x_lbc = PML(600.0,100.0); x_rbc = PML(600.0,100.0); z_lbc = PML(600.0,100.0); z_rbc = PML(600.0,100.0)
C, C0, m, d = marmousi(patch='mini_square', x_lbc = x_lbc, x_rbc = x_rbc, z_lbc = z_lbc, z_rbc = z_rbc)
n_nodes_x = m.x.n
n_nodes_z = m.z.n
dx = m.x.delta
dz = m.z.delta
x_min = d.x.lbound
x_max = d.x.rbound
z_min = d.z.lbound
z_max = d.z.rbound
x_min_km = x_min/1000.0; x_max_km = x_max/1000.0; z_min_km = z_min/1000.0; z_max_km = z_max/1000.0
marmousi_baseline_true_2d = np.reshape(C , (n_nodes_z, n_nodes_x), 'F')
marmousi_init_2d = np.reshape(C0, (n_nodes_z, n_nodes_x), 'F')
x_label = 'Horizontal coordinate (km)'
z_label = 'Depth (km)'
title_marmousi_baseline_true = 'True marmousi baseline'
title_marmousi_init = 'Initial marmousi'
cbar_min_vel = 1500.0
cbar_max_vel = 4600.0
if rank == 0:
fig_marmousi_baseline_true = plot_func(1, marmousi_baseline_true_2d, x_min_km, x_max_km, z_min_km, z_max_km, x_label, z_label, title_marmousi_baseline_true, cbar_min = cbar_min_vel, cbar_max = cbar_max_vel)
fig_marmousi_init = plot_func(2, marmousi_init_2d, x_min_km, x_max_km, z_min_km, z_max_km, x_label, z_label, title_marmousi_init, cbar_min = cbar_min_vel, cbar_max = cbar_max_vel)
true_change_2d = np.zeros((n_nodes_z, n_nodes_x))
layer_5_node_nr_z = int(2500.0/20.0); layer_4_node_nr_z = layer_5_node_nr_z - 1; layer_3_node_nr_z = layer_5_node_nr_z - 2; layer_2_node_nr_z = layer_5_node_nr_z - 3; layer_1_node_nr_z = layer_5_node_nr_z - 4
layer_5_node_left = int(5660.0/20.0); layer_5_node_right = int(8180.0/20.0)
layer_4_node_left = layer_5_node_left+1; layer_4_node_right = layer_5_node_right - 2
layer_3_node_left = layer_4_node_left+4; layer_3_node_right = layer_4_node_right - 6
layer_2_node_left = layer_3_node_left+3; layer_2_node_right = layer_3_node_right - 8
layer_1_node_left = layer_2_node_left+6; layer_1_node_right = layer_2_node_right - 9
true_perturb = -200.0
true_change_2d[layer_5_node_nr_z, layer_5_node_left:layer_5_node_right] = true_perturb
true_change_2d[layer_4_node_nr_z, layer_4_node_left:layer_4_node_right] = true_perturb
true_change_2d[layer_3_node_nr_z, layer_3_node_left:layer_3_node_right] = true_perturb
true_change_2d[layer_2_node_nr_z, layer_2_node_left:layer_2_node_right] = true_perturb
true_change_2d[layer_1_node_nr_z, layer_1_node_left:layer_1_node_right] = true_perturb
cbar_min_perturb = -np.abs(true_perturb)
cbar_max_perturb = np.abs(true_perturb)
marmousi_monitor_true_2d = marmousi_baseline_true_2d + true_change_2d
nsources = 19
nreceiver = n_nodes_x
source_spacing = 480.0
x_pos_sources_baseline = np.arange(0.5*source_spacing, x_max, source_spacing)
x_pos_sources_monitor = x_pos_sources_baseline - 240.0
z_pos_sources = z_min + dz
x_pos_receivers = np.linspace(x_min, x_max, n_nodes_x)
z_pos_receivers = z_min + dz
peakfreq = 6.0
local_shots_baseline = make_parallel_shots(pwrap, nsources, x_pos_sources_baseline, z_pos_sources, x_pos_receivers, z_pos_receivers, peakfreq)
local_shots_monitor = make_parallel_shots(pwrap, nsources, x_pos_sources_monitor , z_pos_sources, x_pos_receivers, z_pos_receivers, peakfreq)
trange = (0.0, 7.0)
solver = ConstantDensityAcousticWave(m,
spatial_accuracy_order=6,
trange=trange,
kernel_implementation='cpp')
marmousi_init = np.reshape( marmousi_init_2d, (n_nodes_z*n_nodes_x, 1), 'F')
marmousi_baseline_true = np.reshape(marmousi_baseline_true_2d, (n_nodes_z*n_nodes_x, 1), 'F')
marmousi_monitor_true = np.reshape( marmousi_monitor_true_2d, (n_nodes_z*n_nodes_x, 1), 'F')
tt = time.time()
marmousi_init_model = solver.ModelParameters(m,{'C': marmousi_init})
marmousi_baseline_true_model = solver.ModelParameters(m,{'C': marmousi_baseline_true})
marmousi_monitor_true_model = solver.ModelParameters(m,{'C': marmousi_monitor_true})
generate_seismic_data(local_shots_baseline, solver, marmousi_baseline_true_model)
print 'Baseline data generation: {0}s'.format(time.time()-tt)
####### Inversion algorithm
objective = TemporalLeastSquares(solver, parallel_wrap_shot=pwrap)
invalg = LBFGS(objective, memory_length=10)
tt = time.time()
nsteps = 30
status_configuration = {'value_frequency' : 1,
'residual_length_frequency' : 1,
'objective_frequency' : 1,
'step_frequency' : 1,
'step_length_frequency' : 1,
'gradient_frequency' : 1,
'gradient_length_frequency' : 1,
'run_time_frequency' : 1,
'alpha_frequency' : 1,
}
print "backtrack linesearch is not optimal. Does not guarantee that strong wolfe conditions are satisfied. But this is used in marmousi2D example."
line_search = 'backtrack'
if input_marmousi_inverted_given:
if rank == 0:
indict = loadmat(input_marmousi_inverted)
if 'marmousi_baseline_inverted_2d' in indict:
marmousi_baseline_inverted_2d = indict['marmousi_baseline_inverted_2d']
elif 'marmousi_baseline_inverted_bounded_2d' in indict:
marmousi_baseline_inverted_2d = indict['marmousi_baseline_inverted_bounded_2d']
else:
raise Exception('wrong key!')
else:
marmousi_baseline_inverted_2d = None
else:
print "Starting baseline inversion to improve initial model."
result = invalg(local_shots_baseline, marmousi_init_model, nsteps,
line_search=line_search,
status_configuration=status_configuration, verbose=True)
print 'Run time: {0}s'.format(time.time()-tt)
if rank == 0:
###### Saving the results
marmousi_baseline_inverted_2d = result.C.reshape((n_nodes_z,n_nodes_x), order='F')
out = {'marmousi_baseline_inverted_bounded_2d':marmousi_baseline_inverted_2d, 'marmousi_baseline_true_2d':marmousi_baseline_true_2d.reshape((n_nodes_z,n_nodes_x), order='F')}
savemat('baseline_inverted_bounded_nsteps_' + str(nsteps) + '.mat',out)
else:
marmousi_baseline_inverted_2d = None
marmousi_baseline_inverted_2d = comm.bcast(marmousi_baseline_inverted_2d, root=0) generate_seismic_data(local_shots_monitor, solver, marmousi_monitor_true_model)
print 'Monitor data generation: {0}s'.format(time.time()-tt)
nswaps = 16
nsteps_each = 12
beta_not_normalized_c = np.zeros((n_nodes_z *n_nodes_x, 1))
beta_not_normalized_m = np.zeros((n_nodes_z *n_nodes_x, 1))
beta_not_normalized_c_history_2d = []
beta_not_normalized_m_history_2d = []
marmousi_new_swap_history = []
marmousi_curr_swap = np.reshape(marmousi_baseline_inverted_2d, (n_nodes_z*n_nodes_x,1), 'F')
marmousi_curr_swap_m = 1.0/marmousi_curr_swap**2
marmousi_curr_swap_model = solver.ModelParameters(m,{'C': marmousi_curr_swap})
marmousi_prev_swap = np.zeros(marmousi_curr_swap.shape )
marmousi_prev_swap_m = np.zeros(marmousi_curr_swap_m.shape)
for i in xrange(nswaps):
if rank == 0:
print "Starting swap %i"%i
if i%2 == 0:
local_shots_cur_swap = local_shots_monitor
if rank == 0:
print "Using monitor shots"
else:
local_shots_cur_swap = local_shots_baseline
if rank == 0:
print "Using baseline shots"
result = invalg(local_shots_cur_swap, marmousi_curr_swap_model, nsteps_each,
line_search=line_search,
status_configuration=status_configuration, verbose=True)
marmousi_new_swap = result.C
marmousi_new_swap_m = marmousi_new_swap**-2
marmousi_new_swap_model = solver.ModelParameters(m,{'C': marmousi_new_swap})
if i == 0:
if rank == 0:
out = {'marmousi_new_swap_first_iter':marmousi_new_swap}
savemat('marmousi_new_swap_first_iter_nsteps_' + str(nsteps) + '_nswaps_' + str(nswaps) + '_nsteps_each_' + str(nsteps_each),out)
if i > 0:
diff_c_new = marmousi_new_swap - marmousi_curr_swap; diff_c_old = marmousi_curr_swap - marmousi_prev_swap;
diff_m_new = marmousi_new_swap_m - marmousi_curr_swap_m; diff_m_old = marmousi_curr_swap_m - marmousi_prev_swap_m;
beta_not_normalized_c += (np.ones((n_nodes_z * n_nodes_x,1)) - np.sign(diff_c_old * diff_c_new)) * np.abs(diff_c_new)
beta_not_normalized_m += (np.ones((n_nodes_z * n_nodes_x,1)) - np.sign(diff_m_old * diff_m_new)) * np.abs(diff_m_new)
beta_not_normalized_c_history_2d.append(np.copy(beta_not_normalized_c.reshape((n_nodes_z,n_nodes_x), order='F')))
beta_not_normalized_m_history_2d.append(np.copy(beta_not_normalized_m.reshape((n_nodes_z,n_nodes_x), order='F')))
marmousi_new_swap_history.append(marmousi_new_swap)
marmousi_prev_swap = marmousi_curr_swap
marmousi_prev_swap_m = marmousi_curr_swap_m
marmousi_prev_swap_model = marmousi_curr_swap_model
marmousi_curr_swap = marmousi_new_swap
marmousi_curr_swap_m = marmousi_new_swap_m
marmousi_curr_swap_model = marmousi_new_swap_model
if rank == 0:
out = {'beta_not_normalized_c_history_2d':beta_not_normalized_c_history_2d, 'beta_not_normalized_m_history_2d':beta_not_normalized_m_history_2d, 'marmousi_new_swap_history':marmousi_new_swap_history}
savemat('beta_history_nsteps_' + str(nsteps) + '_nswaps_' + str(nswaps) + '_nsteps_each_' + str(nsteps_each) + '_marmousi_history.mat',out)
|
kiaakrami/An-Alternative-FWI-AFWI-Algorithm-for-Monitoring-Time-lapse-Velocity-Changes
|
chapter_4_thesis_replication.py
|
Python
|
gpl-3.0
| 12,981
|
[
"Gaussian"
] |
fd1d409f00e1fe85285107cbf3163db1eb322f9506708b4f59647e2a6760c5c5
|
# Map assignments to questions
CURRENT_ASSIGNMENTS = ['hw01']
QUESTIONS = {
'HOG': ,
'ANTS': ,
'SCHEME':
'hw01':
'hw02':
'hw03':
'hw04':
'hw05':
'hw06':
'hw07':
'hw08':
'hw09':
'hw10':
'hw11': ,
}
OFFICE_HOURS = {
'MONDAY': {
(11, 12): (('Soumya', 'Timothy'), 'Garbarini Lounge'),
(12, 13): (('Michelle', 'Sumukh'), 'Garbarini Lounge'),
(13, 14): (('Rohan', 'Soumya'), 'Garbarini Lounge'),
(15, 16): (('Joy', 'Soumya'), 'Garbarini Lounge'),
(16, 17): (('Dickson', 'Joy'), 'Garbarini Lounge'),
},
'TUESDAY': {
(11, 12): (('Michelle', 'Harold'), 'Garbarini Lounge'),
(12, 13): (('Michelle'), 'Garbarini Lounge'),
(13, 14): (('Austin', 'Robert'), 'Garbarini Lounge'),
(14, 15): (('Brian', 'Marvin'), 'Garbarini Lounge'),
(15, 16): (('Kevin'), 'Garbarini Lounge'),
(16, 17): (('Alana', 'Kevin'), 'Garbarini Lounge'),
(17, 18): (('Albert', 'Rohan'), 'Garbarini Lounge'),
},
'WEDNESDAY': {
(11, 12): (('Timothy', 'Youri'), 'Garbarini Lounge'),
(12, 13): (('Brian', 'Timothy'), 'Garbarini Lounge'),
(13, 14): (('Albert', 'Harold'), 'Garbarini Lounge'),
(15, 16): (('Austin', 'Sumukh'), 'Garbarini Lounge'),
},
'THURSDAY': {
}
}
|
kvchen/officehour-queue
|
oh_queue/constants.py
|
Python
|
mit
| 1,386
|
[
"Brian"
] |
4445f77a6e6b6758ae5afe759cbdb44dca038386068c965034da4f5da541d0e5
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
|
humblec/external-storage
|
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 37,444
|
[
"CDK"
] |
3777b1e775713877c45a47189836735907c90680354b4ca32e9707b961abdfff
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'SunSoft (BSD like)',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD MIT/X11 (BSD like)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSD (4 clause) ISC',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
'MPL (v1.1) LGPL (unversioned/unknown version)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/trace-viewer/tracing/third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jsoncpp/source': [
# https://github.com/open-source-parsers/jsoncpp/issues/234
'UNKNOWN',
],
'third_party/junit/src': [
# https://github.com/junit-team/junit/issues/1132
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# BSD 2-clause license.
'third_party/pycoverage': [
'UNKNOWN',
],
'third_party/pyelftools': [ # http://crbug.com/222831
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# http://crbug.com/471372
# BSD
'tools/swarming_client/third_party/pyasn1': [
'UNKNOWN',
],
# http://crbug.com/471372
# Apache v2.0.
'tools/swarming_client/third_party/rsa': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
# Not shipped, downloaded on trybots sometimes.
'tools/telemetry/third_party/gsutil': [
'BSD MIT/X11 (BSD like)',
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'v8/src/third_party/kernel/tools/perf/util/jitdump.h': [ # http://crbug.com/391716
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
hgl888/chromium-crosswalk
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 16,880
|
[
"Galaxy"
] |
a6026fb90b6b3e86cdf99e34dd4f4191af0491a773edce7973b96b18db128e9b
|
#!/usr/bin/env python
#python 3.3 requires biopython
#Version 1. Darcy Jones, January 2014.
#Contact Darcy Jones, darcy.ab.jones@gmail.com
#----------------------------------- LICENSE ---------------------------------------#
# mcl2genes - takes gene family output information from MCL analysis and writes #
# output appropriate for use with ParaAT and KaKs_Calculator #
# #
# Copyright (C) 2014 Darcy Jones #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#-----------------------------------------------------------------------------------#
###Import modules
import sys;
import argparse;
import re;
import os;
from Bio import SeqIO;
from difflib import SequenceMatcher, get_close_matches;
###Function definitions.
def debug(string):
pass;
#sys.stderr.write("DEBUG: {}\n".format(string));
def parseMCL(file_handle):
"""
returns the information held in the mcl file as a dictionary
format: mcl_dict := {key := group_name, value := {key := File/genome, value := {set of genes} } };
also a returns a set of fasta nicknames found in the mcl file.
"""
mcl_dict = dict();
fasta_nicknames = set()
for group in file_handle:
group_name = group.split(':')[0];
group_values_list = group.rstrip().split(': ')[1].split(' ');
group_values_dict = dict()
for value in group_values_list:
key = value.split('|')[0];
if key not in fasta_nicknames:
fasta_nicknames.add(key);
val = value.split('|')[1];
if key in group_values_dict:
group_values_dict[key].add(val);
else:
group_values_dict[key] = {val};
mcl_dict[group_name] = group_values_dict;
return mcl_dict, fasta_nicknames;
def indexFastaFiles(fasta_nicknames):
"""
returns a dictionary of nicknames and indexed fasta files
format: output := {key: nickname, value := indexed fasta file (a dictionary keyed by id)};
"""
output = dict()
for nickname in fasta_nicknames:
possible_files = get_close_matches(nickname, fasta_path, cutoff=0.3)
if len(possible_files) > 0:
debug('Possible matches for {}:'.format(nickname));
debug('\t{}'.format(" ".join(possible_files)));
debug('\tUsing: {}'.format(possible_files[0]));
output[nickname] = SeqIO.index(possible_files[0], 'fasta', key_function= lambda key: re.sub(':|;|\||_|\s|\*|\.|-|,|\[|\]|\{|\}|\(|\)|\'|\"', '', key)); # removing all punctuation to improve hits.
else:
debug('Could not find a file corresponding to {} in {}'.format(nickname, fasta_path));
debug('Members of groups coming from this race will not be included in the sequences');
return output;
def findSequencesToWrite(group):
"""
figures out which sequences we want to write to the group fasta.
returns a list of biopython seqRecords
"""
to_write = [];
for file_ in mcl_dict[group]:
if file_ in fasta_files:
for transcript in mcl_dict[group][file_]:
possible_sequences = get_close_matches(re.sub(':|;|\||_|\s|\*|\.|-|,|\[|\]|\{|\}|\(|\)|\'|\"', '', transcript), fasta_files[file_], cutoff=0.3);
if len(possible_sequences)>0:
to_write.append(fasta_files[file_][possible_sequences[0]]);
return to_write;
def findSequencesToWriteFaster(group):
"""
figures out which sequences we want to write to the group fasta.
returns a list of biopython seqRecords
"""
to_write = [];
for file_ in mcl_dict[group]:
if file_ in fasta_files:
for transcript in mcl_dict[group][file_]:
sub_transcript = re.sub(':|;|\||_|\s|\*|\.|-|,|\[|\]|\{|\}|\(|\)|\'|\"', '', transcript);
if sub_transcript in fasta_files[file_]:
sequence = fasta_files[file_][sub_transcript];
to_write.append(sequence);
return to_write;
###Code.
def main(input_path, fasta_path, output_dir='./', match = False, write_output = False):
debug(input_path)
debug(fasta_path)
if output_dir != None:
if not os.path.isdir(output_dir):
os.makedirs(output_dir);
global mcl_dict, fasta_files;
# Find which sequences we want to write to a group
with open(input_path, 'rU') as MCL_file_handle:
mcl_dict, fasta_nicknames = parseMCL(MCL_file_handle);
# Prepare the fasta files. Index them and remove punctuation from names. files are accessible via nicknames.
fasta_files = indexFastaFiles(fasta_nicknames);
# Write each group to a multifasta file.
with open(os.path.join(output_dir, 'mcl2genes_groups.txt'), 'w') as group_summary:
for group in sorted(mcl_dict):
if match: # Use the slower but easier method
sequences = findSequencesToWrite(group);
else: # treat ids in mcl as literal match to fasta ids.
sequences = findSequencesToWriteFaster(group);
if len(sequences)>1: # We can't align 0 or 1 sequences
print("{}\t{}\t{}\n".format(group, len(sequences),';'.join([seq.id for seq in sequences])));
group_summary.write('{}\n'.format('\t'.join([re.sub(':|;|\||_|\s|\*|\.|-|,|\[|\]|\{|\}|\(|\)|\'|\"', '', seq.id)for seq in sequences])));
if write_output == True:
SeqIO.write(sequences, os.path.join(output_dir, '{}.fa'.format(group)), 'fasta');
# Combine CDS files and translate.
with open(os.path.join(output_dir, 'master_cds_nuc.fa'), 'w') as master_cds_nuc, open(os.path.join(output_dir, 'master_cds_pep.fa'), 'w') as master_cds_pep:
for cds_file in fasta_path:
if os.path.isfile(cds_file):
sequences = SeqIO.parse(cds_file, 'fasta');
write_list_nuc = [];
write_list_pep = []; # Yes i could do one list but i'd rather use more memory and save the time it would take to loop through a second time.
for sequence in sequences:
sequence.id = re.sub(':|;|\||_|\s|\*|\.|-|,|\[|\]|\{|\}|\(|\)|\'|\"', '', sequence.id);
sequence.name = '';
sequence.description = '';
if len(sequence.seq)%3!=0:
debug('added N to {}'.format(sequence.id));
sequence.seq = sequence.seq[:len(sequence.seq)%3];
SeqIO.write(sequence, master_cds_nuc, 'fasta');
#write_list_nuc.append(sequence);
sequence.seq = sequence.seq.translate(stop_symbol=''); # I'll assume standard codon useage, Note that ParaAt doesn't handle * as stop codon.
#write_list_pep.append(sequence);
SeqIO.write(sequence, master_cds_pep, 'fasta');
# DONE :)
if __name__== '__main__':
###Argument handling.
arg_parser = argparse.ArgumentParser(description='Takes gene family output information from MCL analysis and writes output appropriate for use with ParaAT and KaKs_Calculator ');
arg_parser.add_argument("input_path", help="Directory to MCL file containing gene family information file. To use stdin (for piped input) enter '-'");
arg_parser.add_argument("-f", "--fasta_path", nargs='*', default=None, help="Directory to fasta files references in MCF file (usually genomes). Multiple files can be specified with a space.");
arg_parser.add_argument("-o", "--output_dir", default='./', help="Directory for all output to be written to. Default = current Directory");
arg_parser.add_argument("-m", "--match", default=False, action='store_true', help="Boolean toggle to search for closest matches to sequence names rather that using mcl gene names as literal gene names. THIS IS SLOW, consider renaming names if possible.");
arg_parser.add_argument("-w", "--write_output", default=False, action='store_true', help="Boolean toggle to ");
args = arg_parser.parse_args();
###Variable Definitions
input_path=args.input_path;
output_dir=args.output_dir;
fasta_path=args.fasta_path;
write_output=args.write_output;
match=args.match;
main(input_path, fasta_path, output_dir, match, write_output);
|
darcyabjones/mcl-to-kaks
|
mcl2genes.py
|
Python
|
gpl-3.0
| 9,682
|
[
"Biopython"
] |
e75832acfc32011f21bec6f76c54871de32b264480162211b75edbeb3b5a4c19
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import sys
import os
from PyQt4 import QtCore, QtGui
from ui.Ui_exterminator import Ui_MainWindow
from ui.Ui_finalize import Ui_FinalImage
from ui.Ui_export_to_hdf5 import Ui_ExportHdf5
from etc import changelog
from lxml import objectify
import codecs
import numpy as np
#import time
#from pyqtgraph.flowchart import Flowchart, Node
#import pyqtgraph.flowchart.library as fclib
#from pyqtgraph.flowchart.library.common import CtrlNode
import pyqtgraph as pg
from pyqtgraph.parametertree import Parameter
import re
import tifffile as tf
from scipy import misc
#from memory_profiler import profile
from copy import deepcopy
import cv2
version = '0.1-beta'
#starttime = time.time()
#background colors used in lists in gui:
col_good = QtGui.QBrush(QtGui.QColor(185, 255, 155))
col_good.setStyle(QtCore.Qt.SolidPattern)
col_not_so_good = QtGui.QBrush(QtGui.QColor(255, 255, 140))
col_not_so_good.setStyle(QtCore.Qt.SolidPattern)
col_bad = QtGui.QBrush(QtGui.QColor(255, 180, 155))
col_bad.setStyle(QtCore.Qt.SolidPattern)
pen_tile = QtGui.QPen(QtGui.QColor(100, 200, 100))
brush_tile = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
pen_selection = QtGui.QPen(QtGui.QColor(100, 200, 100))
brush_selection = QtGui.QBrush(QtGui.QColor(255, 255, 150, 175))
pen_hover = QtGui.QPen(QtGui.QColor(255, 255, 255))
#bellow could be in class, but it is working faster as separate dictionaries
mapping_list = {} # initiate global dictionary of tile files
final_list = {} # initiate global dictionary of finalization tree
xstage = {} # dict of global x coordinate of the stage
ystage = {} # dict of global y -//-
image_size = {}
max_tiles = {}
tile_index = {}
pts = {} # tile selected in vector overview of samples
stitchy = {} # 3x3 tile selection of samples
filter_parameter_list = {}
base_image = {} # detector type used as image in stitching (i.e. BSE images)
#####################
#helper functions:
#####################
def image2numpy(filename):
if filename.rsplit('.', 1)[-1] == 'txt':
data = np.loadtxt(filename, delimiter=';', dtype='float32')
else:
data = misc.imread(filename)
# in case of signal beeing more than 8 bit decrease the
# bit depth to 32bit(float) else to unsigned 8bit:
if data.max() <= 255:
data = data.astype(np.uint8)
return data
def waiting_effects(function):
def new_function(self):
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(
QtCore.Qt.WaitCursor))
function(self)
QtGui.QApplication.restoreOverrideCursor()
return new_function
def cos_(angle):
return np.cos(angle * np.pi / 180)
def sin_(angle):
return np.sin(angle * np.pi / 180)
def fill_mapping_dict(directory):
global mapping_list
""" function pupulates global dictionary of mapping tiles,
function returns sum of counted apropriate files.
Function requires these arguiments:
Args:
directory -- where to search files
function uses standard python methods in splitting and finiding
the basic metadata saved in file names or
(TO_BE_DEVELOPED) in tags of TIFF """
z = 0
re_sample = re.compile('[\s](?=\(\d)')
re_signal = re.compile('(?<=\d\))[_]')
for i in next(os.walk(directory))[2]:
header, ft = i.rsplit('.', 1)
if np.shape(re.findall(r'\(\d+\,\d+\)', i))[0] == 1 and ft != 'bcf':
z += 1
j = re_sample.split(header)[0]
#j = i.rsplit(" ", 1)[0]
#k, ft = i.rsplit("_", 1)[1].rsplit(".", 1)
#l = i.rsplit("_", 1)[0]
l, k = re_signal.split(header)
if j in mapping_list:
if k in mapping_list[j]:
mapping_list[j][k][l] = directory + "/" + i
else:
mapping_list[j][k] = {l: directory + "/" + i}
final_list[j][k] = {}
else:
mapping_list[j] = {k: {l: directory + "/" + i}}
final_list[j] = {k: {}}
elif np.shape(re.findall(r'\(\d+\,\d+\)', i))[0] == 2 and\
i.rsplit('.', 1)[1] != 'txt':
z += 1
smpl = i.split(" ")[0]
tile = ' '.join([smpl, i.rsplit(" ", 1)[-1].split('_')[0]])
data_type = i.rsplit(" ", 1)[1].split('_')[1].split('.')[0]
if smpl in mapping_list:
if data_type in mapping_list[smpl]:
mapping_list[smpl][data_type][tile] = directory + "/" + i
else:
mapping_list[smpl][data_type] = {tile: directory + "/" + i}
final_list[smpl][data_type] = {}
else:
mapping_list[smpl] = {data_type: {tile: directory + "/" + i}}
final_list[smpl] = {data_type: {}}
return z
def fill_tile_index():
"""function which fills tile_index
requires one arguement: dictionary with filenames"""
global tile_index
for i in mapping_list:
for j in list(mapping_list[i][base_image[i]].keys()): # eddited
k = j.rsplit("(", 1)[1].strip(')').split(",")
if i in tile_index:
tile_index[i][j] = {'x': int(k[1]), 'y': int(k[0])}
else:
tile_index[i] = {j: {'x': int(k[1]), 'y': int(k[0])}}
#idea is good, but it doesn't work...:
# why?
#def cleanXML(xml):
#roman = ['nulla', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX']
#for i in range(9):
#xml = xml.replace(''.join(['<', str(i)]), ''.join(['<', roman[i]]))
#xml = xml.replace(''.join(["<\", str(i)]), ''.join(["<\\", roman[i]]))
#return xml
def parseXML(xmlFile):
""" parse the broken bruker xml (.rtj) into python objects"""
with codecs.open(xmlFile, encoding='cp1252') as f:
next(f) # skip the xml header
#####################################################################
#Why skiping? because Bruker are not using and will not use Unicode
# in near future.
# Additionally Bruker xmls are always broken:
#tags contain or begin with forbiden charts.
# So instead of directly opening
# the xml with lxml.objectify, the file is opened in python as text
# file, and then cleaned up (invalide tags, replaced with something
# legal)
####################################################################
xml = f.read()
#xml = codecs.encode(xml, 'utf-8')
#xml = xml.replace("WINDOWS-1252", "UTF-8")
xml = xml.replace(":x", "x") # clean da bruker shit up
xml = xml.replace(":y", "y") # clean da bruker shit up
xml = xml.replace("2ndTier", "IIndTier") # clean da bruker shit up
#xml = cleanXML(xml) # there would come handy function....
#re.sub("<.?[0-9]",'</nr',xml)
return objectify.fromstring(xml)
def imageSize():
global image_size
for sample in mapping_list:
for i in mapping_list[sample]:
for j in mapping_list[sample][i]:
if mapping_list[sample][i][j].rsplit('.', 1)[-1] == 'txt':
tile = np.loadtxt(mapping_list[sample][i][j], delimiter=';')
else:
tile = misc.imread(mapping_list[sample][i][j])
break
break
image_size[sample] = np.shape(tile)
return image_size
def filterImage(img, fltr, *args):
if fltr == 'blur':
out_img = cv2.blur(img, (args[0], args[0]))
elif fltr == 'median':
out_img = cv2.medianBlur(img, *args)
elif fltr == 'gaussian':
out_img = cv2.GaussianBlur(img, (args[0], args[0]), args[1])
elif fltr == 'bilateral':
out_img = cv2.bilateralFilter(img, *args)
return out_img
def tile_name(sample, x, y):
return sample + ' (' + str(y) + ',' + str(x) + ')'
def stitching_list(sample, tile):
global stitchy
stitchy[sample] = []
x = tile_index[sample][tile]['x']
y = tile_index[sample][tile]['y']
for n in (x - 1, x, x + 1):
for m in (y - 1, y, y + 1):
stitchy[sample].append(tile_name(sample, n, m))
def maxTileNumberBr():
"""returns value of x and y tiles of mosaic
requires dictionary with filenames per tile if data files
have organized tile position in the file name:
''sample detector/element_line (y,x) tiles'"""
global max_tiles
for h in mapping_list:
max_tiles[h] = {}
a = []
for j in mapping_list[h][base_image[h]]:
a.append(j.rsplit(" ", 1)[1].strip("(").strip(")").split(","))
x = []
y = []
for k in a:
x.append(int(k[1]))
y.append(int(k[0]))
maxx = max(x)
maxy = max(y)
max_tiles[h]['maxx'] = maxx
max_tiles[h]['maxy'] = maxy
return max_tiles
class selectableRect(QtGui.QGraphicsRectItem):
def __init__(self, name, *args):
QtGui.QGraphicsRectItem.__init__(self, *args)
self.setAcceptHoverEvents(True)
self.name = name
def hoverEnterEvent(self, ev):
self.savedPen = self.pen()
self.setPen(QtGui.QPen(QtGui.QColor(255, 255, 255)))
ev.ignore()
def hoverLeaveEvent(self, ev):
self.setPen(self.savedPen)
ev.ignore()
def mousePressEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
ev.accept()
myapp.reset_tile_graph_color()
self.setBrush(QtGui.QBrush(brush_selection))
sample = self.name.split()[0]
pts[sample] = self.name
stitching_list(sample, self.name)
myapp.populate_img_dict(sample)
try:
if myapp.ui.stitchWidget.isVisible():
myapp.tweeke.vb.clear()
for i in myapp.img[sample]:
myapp.tweeke.vb.addItem(myapp.img[sample][i])
myapp.tweeke.vb.autoRange()
if myapp.ui.filterDockWidget.isVisible():
myapp.set_filter_images()
except:
pass
else:
ev.ignore()
class BSEMissing(QtGui.QDialog):
def __init__(self, sample, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Huston. We have a problem...')
self.verticalLayout = QtGui.QVBoxLayout(self)
self.label = QtGui.QLabel(
'No image from chosen folder have the "BSE", "AsB" or "BEI" part '
'in the filename. Please chose from list detector/element to be '
'the base image (image showed while tweeking stitching parameters)')
self.label.setWordWrap(True)
self.verticalLayout.addWidget(self.label)
self.listView = QtGui.QListView()
self.verticalLayout.addWidget(self.listView)
self.model = QtGui.QStringListModel(list(final_list[sample].keys()))
self.listView.setModel(self.model)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.setEnabled(False)
self.listView.clicked.connect(self.enableTheButton)
self.buttonBox.accepted.connect(self.accept)
def enableTheButton(self):
self.buttonBox.setEnabled(True)
self.listView.clicked.disconnect(self.enableTheButton)
def returnSample(self):
return self.model.itemData(self.listView.selectedIndexes()[0])[0]
class AboutDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("About")
self.resize(600, 450)
self.verticalLayout = QtGui.QVBoxLayout(self)
#self.dalek = QtGui.QLabel(parent=self)
#sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum,
#QtGui.QSizePolicy.MinimumExpanding)
#sizePolicy.setHorizontalStretch(1)
#sizePolicy.setVerticalStretch(1)
#self.dalek.setSizePolicy(sizePolicy)
#self.dalek.setMinimumSize(QtCore.QSize(140, 50))
#self.dalek.setBaseSize(QtCore.QSize(140, 50))
#self.dalek.setPixmap(QtGui.QPixmap('dalek.png'))
#self.dalek.setScaledContents(True)
#self.verticalLayout.addWidget(self.dalek)
self.textBrowser = QtGui.QTextBrowser(self)
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(True)
self.verticalLayout.addWidget(self.buttonBox)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"),
self.reject)
self.textBrowser.setHtml("<html><head></head>\n"
"<body>\n"
"<p align=\"center\">\n"
"<span style=\" font-size:16pt;\">Qstitch\n"
"</span></p>\n"
"<p> This software can be used for stiching tiled images and element\n"
" mapping data from SEM or similar equipments.\n"
"<ul>Copyright © 2015 Petras Jokubauskas klavishas@gmail.com</p>\n"
"<p>This program is free software: you can redistribute it and/or\n"
" modify it \n"
"under the terms of the GNU General Public License as published by\n"
" the Free \n"
"Software Foundation, version 3 of the License, or any\n"
" later version.</p>\n"
"<p>This program is distributed in the hope that it will be useful,\n"
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
" General Public License for more details.</p>\n"
"<p>You should have received a copy of the GNU General Public License\n"
" along with this program. If not, see http://www.gnu.org/licenses/.\n"
"</p>\n"
"<p> If code is not available in repisitories such as github,\n"
" you can ask author</p>"
"<p>Program uses <span style=\" font-weight:600;\">pyqtgraph</span> \n"
"(for graphical representation and graph visualisation\n"
"),<span style=\" font-weight:600;\">os and \n"
"sys</span> to deal with files; \n"
"<span style=\" font-weight:600;\">pytables</span> are planed to be \n"
"implemented for really huge elemental mappings and hdf5 support;\n"
"<span style=\" font-weight:600;\">lxml</span> to parse xml like, \n"
"<span style=\" color:#ff0000;\">BROKEN</span> by design \n"
"BRUKER™©® files (*.rtj) generated for jobs and \n"
"project files (*.rtx);<span style=\" font-weight:600;\">numpy</span>\n"
"is used for generating 2 dimentional array where all stiching\n"
" happens by \n"
"populating separate tiles into slice of the array.</p>\n"
"<ul><li>At the begining the selected directory are searched for\n"
" apropriate \n"
"tiling files and indexed into hierachical list/tree: \n"
"sample/element/tile/filename.</li>\n"
"<li><span style=\" font-weight:600;\">2nd</span> the shitty bruker\n"
" jobs \n"
"file are opened, cleand up from the incompetent\n"
" bruker™©® \n"
""engineers" crap to compile with xml standard, and then \n"
"objectified with lxml.objectify.</li>\n"
"<li><span style=\" font-weight:600;\">3rd</span> from objectified\n"
" xml, \n"
"the main parameters (leaving behind the shitload™ of \n"
"clusterfuck™ of the rest of 99.9% bruker\'s™©® \n"
"bullshit™) are extracted.</li>\n"
"<li><span style=\" font-weight:600;\">4th</span> from gathered\n"
" information \n"
"dummy array are generated </li>\n"
"<li><span style=\" font-weight:600;\">5th</span> using 3x3 grid\n"
" the main \n"
"parameters of the image stiching can be setup.</li>\n"
"<li><span style=\" font-weight:600;\">6th</span> after that huge\n"
" numpy \n"
"arrays can be generated and saved to appropriate \n"
"format</li></ul></body></html>")
class ChangeLogDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("ChangeLog")
self.resize(600, 450)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.textBrowser = QtGui.QTextBrowser(self)
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(True)
self.verticalLayout.addWidget(self.buttonBox)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"),
self.reject)
self.textBrowser.setText(changelog.changelog)
def filter_basic(sample):
a_list = []
temp_list = deepcopy(final_list[sample])
for i in temp_list:
a_list.append({'name': i,
'type': 'list',
'values': ['None',
'median',
'bilateral',
'blur',
'gaussian'],
'value': 'None'})
return a_list
initial_filter_parameters = {'bilateral':
[{'name': 'd',
'type': 'int',
'value': 1},
{'name': 'sigmaColor',
'type': 'float',
'value': 1.0},
{'name': 'sigmaSpace',
'type': 'float',
'value': 1.0}
],
'median':
[{'name': 'ksize',
'type': 'int',
'value': 1,
'step': 2,
'limits': (1, 255)
}],
'gaussian':
[{'name': 'ksize',
'type': 'int',
'value': 1,
'step': 2,
'limits': (1, 255)},
{'name': 'sigma',
'type': 'float',
'value': 1.0,
'limits': (0.0, 255.0)}],
'blur':
[{'name': 'ksize',
'type': 'int',
'value': 1,
'limits': (1, 255)}]}
def filter_parameters():
global filter_parameter_list
initial_list = deepcopy(final_list) # copy dict
for sample in initial_list:
for detector in initial_list[sample]:
initial_list[sample][detector] = deepcopy(initial_filter_parameters)
filter_parameter_list = initial_list
def param_basic(sample):
tree = [
{'name': 'Image', 'type': 'group', 'children': [
{'name': 'height', 'type': 'int', 'value': image_size[sample][0],
'suffix': 'px', 'readonly': True},
{'name': 'width', 'type': 'int', 'value': image_size[sample][1],
'suffix': 'px', 'readonly': True},
{'name': 'dx', 'type': 'float', 'value': 0.000001, 'suffix': 'm/px',
'siPrefix': True, 'dec': True, 'step': 0.01},
{'name': 'dy', 'type': 'float', 'value': 0.000001, 'suffix': 'm/px',
'siPrefix': True, 'dec': True, 'step': 0.01},
{'name': 'rotation', 'type': 'float', 'value': 0,
'suffix': ' degree', 'step': 0.01},
{'name': 'cutoff', 'type': 'group', 'children': [
{'name': 'left', 'type': 'int', 'value': 0, 'suffix': 'px',
'limits': (0, image_size[sample][1] / 4)},
{'name': 'right', 'type': 'int', 'value': 0, 'suffix': 'px',
'limits': (0, image_size[sample][1] / 4)},
{'name': 'top', 'type': 'int', 'value': 0, 'suffix': 'px',
'limits': (0, image_size[sample][0] / 4)},
{'name': 'bottom', 'type': 'int', 'value': 0, 'suffix': 'px',
'limits': (0, image_size[sample][0] / 4)},
]},
]},
{'name': 'microscope', 'type': 'group', 'children': [
{'name': 'magnification', 'type': 'float', 'value': 0},
{'name': 'WD', 'type': 'float', 'value': 0, 'suffix': 'mm'},
{'name': 'HV', 'type': 'float', 'value': 0, 'suffix': 'V',
'siPrefix': True, 'dec': True, 'step': 1},
]},
{'name': 'stage', 'type': 'group', 'children': [
{'name': 'stage step x', 'type': 'float',
'value': image_size[sample][1] / 1000000, 'suffix': 'm',
'siPrefix': True, 'dec': True, 'step': 0.1
},
{'name': 'stage step y', 'type': 'float',
'value': image_size[sample][0] / 1000000, 'suffix': 'm',
'siPrefix': True, 'dec': True, 'step': 0.1
},
]},
{'name': 'mosaic tiles', 'type': 'group', 'children': [
{'name': 'horizontal tiles', 'type': 'int',
'value': max_tiles[sample]['maxx'], 'readonly': True},
{'name': 'vertical tiles', 'type': 'int',
'value': max_tiles[sample]['maxy'], 'readonly': True},
]},
]
return tree
def param_first(sample, basic_part):
tree = [
{'name': sample, 'type': 'group', 'children': basic_part},
]
return tree
def param_branch(sample, basic_part):
tree = [
{'name': sample, 'type': 'group', 'children': basic_part},
]
return Parameter(name=sample, type='group', children=tree)
class StartQT4(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Qstitch v" + version)
# signal connections:
self.ui.actionImportDataFolder.triggered.connect(self.import_the_data)
self.ui.actionImportMetadata.triggered.connect(self.import_the_jobs)
self.ui.actionAbout.triggered.connect(self.help_about)
self.ui.actionAbout_Qt.triggered.connect(self.help_about_Qt)
self.ui.actionChangelog.triggered.connect(self.help_changelog)
self.ui.actionExportImages.triggered.connect(self.export_image)
self.ui.actionExportHdf5.triggered.connect(self.export_hdf5)
self.ui.actionClear.triggered.connect(self.clear_the_data)
self.ui.overviewComboBox.currentIndexChanged.connect(self.draw_tiles)
self.ui.overviewComboBox.currentIndexChanged.connect(
self.change_nineImg_sample)
self.ui.setAsDefault.clicked.connect(self.save_parameter_state)
#dynamic widget hiding/showing trigger:
self.ui.actionDynamicWidgets.triggered.connect(
self.toggle_toggling_of_widgets)
# initial enbale/disable of gui parts
self.ui.tabWidget.setTabEnabled(1, False)
self.ui.tabWidget.setTabEnabled(2, False)
self.ui.tabWidget.setTabEnabled(3, False)
self.ui.overviewWidget.setVisible(False)
self.ui.filterDockWidget.setVisible(False)
self.toggle_filter_dock_action =\
self.ui.filterDockWidget.toggleViewAction()
self.toggle_overview_dock_action =\
self.ui.overviewWidget.toggleViewAction()
self.toggle_stitch_dock_action = self.ui.stitchWidget.toggleViewAction()
self.toggle_stitch_dock_action.setText('Show/Hide 3x3 stitching view')
self.toggle_overview_dock_action.setText('Show/Hide tile overview')
self.toggle_filter_dock_action.setText('Show/Hide filtering preview')
self.ui.menuView.addAction(self.toggle_overview_dock_action)
self.ui.menuView.addAction(self.toggle_stitch_dock_action)
self.ui.menuView.addAction(self.toggle_filter_dock_action)
self.toggle_overview_dock_action.setShortcut("Ctrl+Alt+O")
self.toggle_stitch_dock_action.setShortcut("Ctrl+Alt+T")
self.toggle_filter_dock_action.setShortcut("Ctrl+Alt+F")
self.toggle_debug = self.ui.consoleWidget.toggleViewAction()
self.ui.menuView.addAction(self.toggle_debug)
self.toggle_debug.setShortcut("Ctrl+Alt+D")
self.ui.pythonConsole.localNamespace = globals()
self.ui.consoleWidget.setVisible(False)
self.ui.stitchWidget.setVisible(False)
self.ov = self.ui.graphicalOverview
self.vb = self.ov.addViewBox(0, 1)
self.vb.setAspectLocked(True)
xScale = pg.AxisItem(orientation='bottom', linkView=self.vb)
self.ov.addItem(xScale, 1, 1)
yScale = pg.AxisItem(orientation='left', linkView=self.vb)
self.ov.addItem(yScale, 0, 0)
self.op = pg.PlotDataItem()
xScale.setLabel(units="m")
yScale.setLabel(units='m')
self.vb.addItem(self.op)
self.vb.invertY()
self.parameters = {}
# setting part for filter visualisation:
self.oi = self.ui.originalView
self.fi = self.ui.filteredView
self.fi.view.setXLink(self.oi.view)
self.fi.view.setYLink(self.oi.view)
self.fpt = self.ui.filtersTreeView
self.fpt.itemSelectionChanged.connect(self.set_filter_images)
# setting dictionaries for data for visualisation
self.filters = {}
self.rect = {}
self.img = {}
self.data1 = {}
#initiation of simple models used in the Q*views:
self.sampleListModel = QtGui.QStandardItemModel()
# initiation of flags for import/append data
self.data_append_flag = 0 # 0 import, 1 for append
self.metadata_append_flag = 0 # same as above
# icons for changing function of button (import/append)
self.icon = QtGui.QIcon()
self.icon.addPixmap(
QtGui.QPixmap(":/exterminator/icons/import_from_dir.svg"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.icon1 = QtGui.QIcon()
self.icon1.addPixmap(
QtGui.QPixmap(":/exterminator/icons/import_rtj.svg"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.icon_a = QtGui.QIcon()
self.icon_a.addPixmap(
QtGui.QPixmap(":/exterminator/icons/append_from_dir.svg"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.icon1_a = QtGui.QIcon()
self.icon1_a.addPixmap(
QtGui.QPixmap(":/exterminator/icons/append_rtj.svg"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
#initialy triggered functions:
self.toggle_toggling_of_widgets()
self.ui.tabWidget.currentChanged.connect(self.unhide_tabs)
def unhide_tabs(self):
tab_name = self.ui.tabWidget.currentWidget().objectName()
if tab_name == 'tab_param':
self.ui.tabWidget.setTabEnabled(2, True)
elif tab_name == 'tab_filters':
self.ui.tabWidget.setTabEnabled(3, True)
self.ui.tabWidget.currentChanged.disconnect(self.unhide_tabs)
def toggle_toggling_of_widgets(self):
if self.ui.actionDynamicWidgets.isChecked():
self.ui.tabWidget.currentChanged.connect(self.toggle_widgets)
else:
try:
self.ui.tabWidget.currentChanged.disconnect(self.toggle_widgets)
except:
pass
def toggle_widgets(self):
tab_name = self.ui.tabWidget.currentWidget().objectName()
if (tab_name == 'tab_tiles') or (tab_name == 'tab_finish'):
self.ui.overviewWidget.setVisible(False)
self.ui.stitchWidget.setVisible(False)
self.ui.filterDockWidget.setVisible(False)
elif tab_name == 'tab_param':
self.ui.overviewWidget.setVisible(True)
self.ui.stitchWidget.setVisible(True)
self.ui.filterDockWidget.setVisible(False)
elif tab_name == 'tab_filters':
self.ui.overviewWidget.setVisible(True)
self.ui.stitchWidget.setVisible(False)
self.ui.filterDockWidget.setVisible(True)
def tweek_filter(self):
if self.fpt.selectedItems() != [] and\
self.fpt.selectedItems()[0].depth == 1:
sample = self.fpt.selectedItems()[0].parent().param.name()
detector = self.fpt.selectedItems()[0].param.name()
a_filter = self.fpt.selectedItems()[0].param.value()
self.ui.filterParamView.clear()
if a_filter != 'None':
temp_param = Parameter(name=a_filter,
type='group',
children=filter_parameter_list[sample][detector][a_filter])
self.ui.filterParamView.setParameters(temp_param)
#temp_parent = self.ui.filterParamView.listAllItems()[0].param
temp_param.sigTreeStateChanged.connect(self.filterParam2dict)
temp_param.sigTreeStateChanged.connect(self.updateFilteredImage)
#self.ui.filterParamView.setParameters(Parameter(name=a_filter,
#type='group',
#children=filter_parameter_list[sample][detector][a_filter]))
else:
try:
self.filtered_image *= 0
self.fi.updateImage()
except:
pass
def updateFilteredImage(self):
if self.fpt.selectedItems() != [] and\
self.fpt.selectedItems()[0].depth == 1:
sample = self.fpt.selectedItems()[0].parent().param.name()
detector = self.fpt.selectedItems()[0].param.name()
a_filter = self.fpt.selectedItems()[0].param.value()
fp = []
if a_filter != 'None':
for i in range(0,
len(filter_parameter_list[sample][detector][a_filter])):
fp.append(
self.ui.filterParamView.listAllItems()[i + 1].param.value())
try:
self.filtered_image[::] = filterImage(self.original_image,
a_filter,
*fp)
self.fi.updateImage()
# the two below are required especialy when jumping
# from the 16bit to 8bit images or vica versa.
self.fi.setLevels(0, np.max(self.filtered_image))
self.fi.setHistogramRange(0, np.max(self.filtered_image))
except (AttributeError, ValueError):
self.filtered_image = filterImage(self.original_image,
a_filter,
*fp)
self.fi.setImage(np.swapaxes(self.filtered_image, 0, 1))
def filterParam2dict(self):
if self.fpt.selectedItems() != [] and\
self.fpt.selectedItems()[0].depth == 1:
sample = self.fpt.selectedItems()[0].parent().param.name()
detector = self.fpt.selectedItems()[0].param.name()
a_filter = self.fpt.selectedItems()[0].param.value()
for i in range(0,
len(filter_parameter_list[sample][detector][a_filter])):
filter_parameter_list[sample][detector][a_filter][i]['value'] =\
self.ui.filterParamView.listAllItems()[i + 1].param.value()
def set_filter_images(self):
if self.fpt.selectedItems() != [] and\
self.fpt.selectedItems()[0].depth == 1:
sample = self.fpt.selectedItems()[0].parent().param.name()
detector = self.fpt.selectedItems()[0].param.name()
a_filter = self.fpt.selectedItems()[0].param.value()
tile = pts[sample]
self.original_image =\
image2numpy(mapping_list[sample][detector][tile])
if self.original_image.dtype == 'float32':
filter_parameter_list[sample][detector]['median'][0]['limits']\
= (1, 5)
self.oi.setImage(np.swapaxes(self.original_image, 0, 1))
self.tweek_filter()
#self.ui.filterParamView.clear()
if a_filter != 'None':
fp = [] # temporary list of filter parameters for given sample
for i in range(0,
len(filter_parameter_list[sample][detector][a_filter])):
fp.append(
self.ui.filterParamView.listAllItems()[i + 1].param.value())
self.filtered_image = filterImage(self.original_image,
a_filter,
*fp)
self.fi.setImage(np.swapaxes(self.filtered_image, 0, 1))
else:
try:
self.filtered_image *= 0
self.filtered_image.astype(np.uin8)
self.fi.setImage(np.swapaxes(self.filtered_image, 0, 1))
except:
pass
#temp_param = Parameter(name=a_filter,
#type='group',
#children=filter_parameter_list[sample][detector][a_filter])
#self.ui.filterParamView.setParameters(temp_param)
##temp_parent = self.ui.filterParamView.listAllItems()[0].param
#temp_param.sigTreeStateChanged.connect(self.filterParam2dict)
else:
self.ui.filterParamView.clear()
try:
self.filtered_image *= 0
self.fi.updateImage()
except:
pass
def help_about_Qt(self):
self.about_Qt = QtGui.QMessageBox.aboutQt(self, "About Qt")
def help_about(self):
self.about = AboutDialog()
self.about.exec_()
def help_changelog(self):
self.changelog = ChangeLogDialog()
self.changelog.exec_()
def export_image(self):
self.update_final_list()
self.save_images = ExportImageWindow()
self.save_images.exec_()
def update_final_list(self):
for sample in final_list:
for detector in final_list[sample]:
final_list[sample][detector] = self.filters[sample].\
param(sample).param(detector).value()
def export_hdf5(self):
self.save_hdf5 = ExportHdf5Window()
self.save_hdf5.exec_()
def fill_item(self, item, value):
"""helper function dic -> QTreeWidget"""
item.setExpanded(True)
if type(value) is dict:
for key, val in sorted(value.items()):
child = QtGui.QTreeWidgetItem()
child.setText(0, str(key))
item.addChild(child)
self.fill_item(child, val)
elif type(value) is list:
for val in value:
child = QtGui.QTreeWidgetItem()
item.addChild(child)
if type(val) is dict:
child.setText(0, '[dict]')
self.fill_item(child, val)
elif type(val) is list:
child.setText(0, '[list]')
self.fill_item(child, val)
else:
child.setText(0, str(value))
child.setExpanded(True)
else:
child = QtGui.QTreeWidgetItem()
child.setText(0, str(value))
child.setToolTip(0, str(value))
item.addChild(child)
def fill_ft_widget(self, value):
self.ui.treeDataTiles.clear() # clear tile files tree widget
self.fill_item(self.ui.treeDataTiles.invisibleRootItem(), value)
def fill_final_widget(self, value):
self.ui.treeFinalWidget.clear() # clear tile files tree widget
for i in final_list:
item_0 = QtGui.QTreeWidgetItem(self.ui.treeFinalWidget)
item_0.setCheckState(0, QtCore.Qt.Unchecked)
item_0.setFlags(QtCore.Qt.ItemIsUserCheckable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsTristate)
item_0.setText(0, i)
item_0.setText(1, i)
for j in final_list[i]:
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1.setCheckState(0, QtCore.Qt.Unchecked)
item_1.setFlags(QtCore.Qt.ItemIsUserCheckable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsEnabled)
item_1.setText(0, j)
item_1.setText(1, j)
def exportFinalTree(self):
mapping = {}
root = self.ui.treeFinalWidget.invisibleRootItem()
for index in range(root.childCount()):
parent = root.child(index)
if parent.checkState(0) >= 1: # checked or partialy checked
mapping[parent.text(0)] = {'name': parent.text(1),
'signals': {}}
for row in range(parent.childCount()):
child = parent.child(row)
if child.checkState(0) == QtCore.Qt.Checked:
mapping[parent.text(0)]['signals'][child.text(0)] =\
child.text(1)
return mapping
def data_overview(self):
samples = 0
thingy = ""
self.maxtile = maxTileNumberBr()
for i in mapping_list:
det = 0
detectors = []
for j in mapping_list[i]:
detectors.append(j)
det += 1
samples += 1
detectors.sort()
dimensions = str(self.maxtile[i]['maxx']) + 'x' + \
str(self.maxtile[i]['maxy'])
thingy += i + "\n " + "elements(+detectors): (" + str(det) + \
")\n " + (", ".join(detectors)) + "\n " + \
"tiles: " + dimensions + "\n\n "
thingy = "samples: (" + str(samples) + ")\n\n " + thingy
self.ui.plainTextEdit.setPlainText(thingy)
def reset_tile_graph_color(self):
for i in self.vb.allChildItems()[2:-1]:
i.setBrush(brush_tile)
def clear_the_data(self):
global mapping_list
global tile_index
global pts
global stitchy
global xstage
global ystage
global image_size
global max_tiles
global final_list
global filter_parameter_list
global base_image
clear_msg = "Are you sure you want to clear all the progress?"
reply = QtGui.QMessageBox.question(self, 'Message',
clear_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
mapping_list = {}
xstage = {}
ystage = {}
tile_index = {}
pts = {}
stitchy = {}
image_size = {}
max_tiles = {}
final_list = {}
filter_parameter_list = {}
base_image = {}
self.rect = {}
self.img = {}
self.data1 = {}
self.ui.treeDataTiles.clear()
self.ui.treeFinalWidget.clear()
self.data_append_flag = 0 # 0 import, 1 for append
self.metadata_append_flag = 0 # same as above
self.ui.actionImportDataFolder.setIcon(self.icon)
self.ui.actionImportDataFolder.setText('import data folder')
self.ui.actionImportMetadata.setIcon(self.icon1)
self.ui.overviewComboBox.setModel(QtGui.QStandardItemModel())
self.parameters = {}
self.ui.treeParameters.clear()
self.ui.plainTextEdit.setPlainText('')
self.vb.clear()
self.fpt.clear()
try:
self.tweeke.vb.clear()
self.oi.setImage((self.original_image * 0).astype(np.uint8))
self.fi.setImage((self.original_image * 0).astype(np.uint8))
except:
pass
self.ui.actionImportMetadata.setEnabled(False)
self.ui.actionExportImages.setEnabled(False)
def import_the_data(self):
"""method of importing tiles from the directory and
populating the dictionary with information and
copying dictionary into QTreeWidget for displaying file hierarchy"""
self.directory = QtGui.QFileDialog.getExistingDirectory(
None,
'Select a folder:',
'/mnt/bulk-data/etc/extra_large_mapping/AL20-1',
QtGui.QFileDialog.ShowDirsOnly)
self.start_importing_data()
for sample in self.filters:
self.filters[sample].sigTreeStateChanged.connect(self.tweek_filter)
self.filters[sample].sigTreeStateChanged.connect(
self.updateFilteredImage)
def getBaseImageName(self):
for sample in mapping_list:
a = list(mapping_list[sample].keys())
b = ['BSE', 'AsB', 'BEI']
#b = ['Signal A']
overlap = [x for x in a if x in b]
if overlap != []:
base_image[sample] = overlap[0]
else:
dlg = BSEMissing(sample)
if dlg.exec_():
base_image[sample] = dlg.returnSample()
@waiting_effects
def start_importing_data(self):
if self.directory != '':
self.data_dir = self.directory
# scaning given directory, initiation of data dictionary, and
# returning counting objects in dictionary
z = fill_mapping_dict(self.data_dir)
#check for BSE, AsB, BEI or custom images to use as the base images
self.getBaseImageName()
fill_tile_index()
imageSize()
if z > 0:
self.ui.actionImportMetadata.setEnabled(True)
self.fill_ft_widget(mapping_list)
self.fill_final_widget(final_list)
self.data_overview()
self.ui.tabWidget.setTabEnabled(1, True)
#self.ui.tabWidget.setTabEnabled(2, True)
self.sampleListModel = QtGui.QStandardItemModel()
for r in list(mapping_list.keys()):
item = QtGui.QStandardItem(r)
item.setEditable(False)
if (self.maxtile[r]['maxx'] or self.maxtile[r]['maxx']) < 3:
item.setBackground(col_bad)
else:
item.setBackground(col_not_so_good)
self.sampleListModel.appendRow(item)
if self.data_append_flag == 0:
self.ui.actionImportDataFolder.setText(
'append the data folder')
self.ui.actionImportDataFolder.setIcon(self.icon_a)
self.data_append_flag = 1
self.parameters[r] = param_branch(r, param_basic(r))
self.filters[r] = param_branch(r, filter_basic(r))
self.ui.treeParameters.setParameters(
self.parameters[r], showTop=False)
self.ui.filtersTreeView.setParameters(
self.filters[r], showTop=False)
elif r not in self.parameters:
self.parameters[r] = param_branch(r, param_basic(r))
self.filters[r] = param_branch(r, filter_basic(r))
self.ui.treeParameters.addParameters(
self.parameters[r], showTop=False)
self.ui.filtersTreeView.addParameters(
self.filters[r], showTop=False)
elif self.parameters[r].param(r).param('mosaic tiles').\
param('horizontal tiles').value() <\
max_tiles[r]['maxx'] and self.parameters[r].\
param(r).param('mosaic tiles').\
param('vertical tiles').value() <\
max_tiles[r]['maxy']:
self.parameters[r].param(r).\
param('mosaic tiles').\
param('horizontal tiles').setValue(
max_tiles[r]['maxx'])
self.parameters[r].param(r).\
param('mosaic tiles').\
param('vertical tiles').setValue(
max_tiles[r]['maxy'])
self.rect[r] = {}
self.img[r] = {}
self.data1[r] = {}
pts[r] = tile_name(r, int(max_tiles[r]['maxx'] / 2 + 1),
int(max_tiles[r]['maxy'] / 2 + 1))
stitching_list(r, pts[r])
self.populate_rect_dict(r)
self.populate_img_dict(r)
self.parameters[r].sigTreeStateChanged.connect(
self.redraw_tiles)
self.parameters[r].sigTreeStateChanged.connect(
self.update_stitch)
self.ui.overviewComboBox.setModel(self.sampleListModel)
self.ui.treeFinalWidget.clicked.\
connect(self.toggle_export_buttons)
self.ui.overviewWidget.setVisible(True)
self.ui.stitchWidget.setVisible(True)
# create stitching preview (3x3) instance:
self.tweeke = NineImg(self.ui.overviewComboBox.currentText())
filter_parameters()
else:
self.dialog = QtGui.QMessageBox(
'No Data',
"no sufficient tile data found in:\n" + str(self.data_dir),
QtGui.QMessageBox.Icon(1),
1, 0, 0) # Ok button
self.dialog.show()
@waiting_effects
def save_parameter_state(self):
for sample in self.parameters:
stage_delta_x = self.parameters[sample].param(sample).\
param('stage').param('stage step x')
stage_delta_x.setDefault(stage_delta_x.value())
stage_delta_y = self.parameters[sample].param(sample).\
param('stage').param('stage step y')
stage_delta_y.setDefault(stage_delta_y.value())
width = self.parameters[sample].param(sample).param('Image').\
param('width')
width.setDefault(width.value())
height = self.parameters[sample].param(sample).param('Image').\
param('height')
height.setDefault(height.value())
dx = self.parameters[sample].param(sample).param('Image').\
param('dx')
dx.setDefault(dx.value())
dy = self.parameters[sample].param(sample).param('Image').\
param('dy')
dy.setDefault(dy.value())
angle = self.parameters[sample].param(sample).param('Image').\
param('rotation')
angle.setDefault(angle.value())
left_cut = self.parameters[sample].param(sample).param('Image').\
param('cutoff').param('left')
left_cut.setDefault(left_cut.value())
right_cut = self.parameters[sample].param(sample).param('Image').\
param('cutoff').param('right')
right_cut.setDefault(right_cut.value())
top_cut = self.parameters[sample].param(sample).param('Image').\
param('cutoff').param('top')
top_cut.setDefault(top_cut.value())
bottom_cut = self.parameters[sample].param(sample).param('Image').\
param('cutoff').param('bottom')
bottom_cut.setDefault(bottom_cut.value())
mag = self.parameters[sample].param(sample).param('microscope').\
param('magnification')
mag.setDefault(mag.value())
WD = self.parameters[sample].param(sample).param('microscope').\
param('WD')
WD.setDefault(WD.value())
HV = self.parameters[sample].param(sample).param('microscope').\
param('HV')
HV.setDefault(HV.value())
def populate_rect_dict(self, sample):
root = self.parameters[sample].param(sample)
stage_delta_x = root.param('stage').param('stage step x').value()
stage_delta_y = root.param('stage').param('stage step y').value()
width = root.param('Image').param('width').value()
height = root.param('Image').param('height').value()
dx = root.param('Image').param('dx').value()
dy = root.param('Image').param('dy').value()
angle = root.param('Image').\
param('rotation').value()
left_cut = root.param('Image').\
param('cutoff').param('left').value()
right_cut = root.param('Image').\
param('cutoff').param('right').value()
top_cut = root.param('Image').\
param('cutoff').param('top').value()
bottom_cut = root.param('Image').\
param('cutoff').param('bottom').value()
for i in tile_index[sample]:
x1 = cos_(angle) * stage_delta_x *\
(tile_index[sample][i]['x'] - 1) +\
sin_(angle) * stage_delta_y *\
(tile_index[sample][i]['y'] - 1) + dx * left_cut
y1 = -sin_(angle) * stage_delta_x *\
(tile_index[sample][i]['x'] - 1) +\
cos_(angle) * stage_delta_y *\
(tile_index[sample][i]['y'] - 1) + dy * top_cut
self.rect[sample][i] = selectableRect(i,
x1,
y1,
(width - left_cut - right_cut) * dx,
(height - top_cut - bottom_cut) * dy
)
self.rect[sample][i].setPen(pen_tile)
self.rect[sample][i].setToolTip(i)
if sample in pts:
if i in pts[sample]:
self.rect[sample][i].setBrush(brush_selection)
else:
self.rect[sample][i].setBrush(pg.mkBrush(None))
def populate_img_dict(self, sample):
self.img[sample] = {}
root = self.parameters[sample].param(sample)
width = root.param('Image').param('width').value()
height = root.param('Image').param('height').value()
left_cut = root.param('Image').param('cutoff').param('left').value()
right_cut = root.param('Image').param('cutoff').param('right').value()
top_cut = root.param('Image').param('cutoff').param('top').value()
bottom_cut = root.param('Image').param('cutoff').param('bottom').value()
for i in stitchy[sample]:
try:
if mapping_list[sample][base_image[sample]][i].\
rsplit('.', 1)[-1] == 'txt':
self.data1[sample][i] = np.loadtxt(
mapping_list[sample][base_image[sample]][i],
delimiter=';').transpose()
else:
self.data1[sample][i] = misc.imread(
mapping_list[sample][base_image[sample]][i]).transpose()
self.img[sample][i] = pg.ImageItem(
self.data1[sample][i][left_cut:width - right_cut,
top_cut:height - bottom_cut])
self.img[sample][i].setRect(self.rect[sample][i].rect())
self.img[sample][i].setOpacity(0.7)
self.img[sample][i].setToolTip(i)
except:
pass
def draw_tiles(self):
if str(self.ui.overviewComboBox.currentText()) != '':
self.vb.clear()
sample = self.ui.overviewComboBox.currentText()
for i in self.rect[sample]:
self.vb.addItem(self.rect[sample][i])
self.vb.autoRange()
# first time it will fail, so it should try and pass...
# should be cleaned in future
#try:
#self.tweeke.vb.clear()
#for i in self.img[sample]:
#self.tweeke.vb.addItem(myapp.img[sample][i])
#self.tweeke.vb.autoRange()
#except:
#pass
def redraw_stitch(self):
if str(self.ui.overviewComboBox.currentText()) != '':
sample = self.ui.overviewComboBox.currentText()
self.populate_img_dict(sample)
def change_nineImg_sample(self):
try:
sample = self.ui.overviewComboBox.currentText()
self.tweeke.vb.clear()
for i in self.img[sample]:
self.tweeke.vb.addItem(myapp.img[sample][i])
self.tweeke.vb.autoRange()
except:
pass
def update_stitch(self):
if str(self.ui.overviewComboBox.currentText()) != '':
sample = self.ui.overviewComboBox.currentText()
root = self.parameters[sample].param(sample)
width = root.param('Image').param('width').value()
height = root.param('Image').param('height').value()
left_cut = root.param('Image').param('cutoff').param('left').value()
right_cut = root.param('Image').\
param('cutoff').param('right').value()
top_cut = root.param('Image').param('cutoff').param('top').value()
bottom_cut = root.param('Image').\
param('cutoff').param('bottom').value()
for i in stitchy[sample]:
try:
self.img[sample][i].setImage(
self.data1[sample][i][left_cut:width - right_cut,
top_cut:height - bottom_cut])
self.img[sample][i].setRect(self.rect[sample][i].rect())
except:
pass
@waiting_effects
def redraw_tiles(self):
if str(self.ui.overviewComboBox.currentText()) != '':
self.vb.clear()
sample = self.ui.overviewComboBox.currentText()
self.populate_rect_dict(sample)
self.draw_tiles()
self.tweeke.vb.clear()
for i in self.img[sample]:
self.tweeke.vb.addItem(myapp.img[sample][i])
def toggle_export_buttons(self):
if not self.exportFinalTree():
self.ui.actionExportImages.setEnabled(False)
#self.ui.actionExportHdf5.setEnabled(False)
else:
self.ui.actionExportImages.setEnabled(True)
#self.ui.actionExportHdf5.setEnabled(True)
def import_the_jobs(self):
global xstage
global ystage
fd = QtGui.QFileDialog(self)
self.da_file = fd.getOpenFileName(None,
'Select the bruker jobs file',
self.data_dir,
'Bruker jobs file (*.rtj)')
self.update_with_jobs()
self.tweeke.vb.autoRange()
@waiting_effects
def update_with_jobs(self):
from os.path import isfile
thingy = {}
for sample_ in mapping_list: # ??
thingy[sample_] = 0 # ??
if isfile(self.da_file):
if self.metadata_append_flag == 0:
self.ui.actionImportMetadata.setText('append metadata(*.rtj)')
self.ui.actionImportMetadata.setIcon(self.icon1_a)
self.metadata_append_flag = 1
root = parseXML(self.da_file)
for i in root.ClassInstance.ChildClassInstances.ClassInstance:
if i.TRTJobEntry.JobType.text == 'jtMapping':
#TBD add condition here for tiling (one signal)
#jobs in the future
sample = i.TRTJobEntry.RootName.text
if i.TRTJobEntry.RootName.text not in xstage:
xstage[sample] = {}
ystage[sample] = {}
for j in i.TRTJobEntry.ClassInstance:
if j.attrib['Type'] == 'TRTJobSEMSettings':
for k in j.ClassInstance:
if (k.attrib['Type'] == 'TRTSEMData') and\
(thingy[sample] == 0):
self.parameters[sample].param(sample).\
param('microscope').\
param('HV').setValue(float(k.HV.text))
self.parameters[sample].param(sample).\
param('microscope').\
param('WD').setValue(float(k.WD.text))
self.parameters[sample].param(sample).\
param('microscope').\
param('magnification').\
setValue(float(k.Mag.text))
self.parameters[sample].param(sample).\
param('Image').\
param('dx').setValue(
float(k.DX.text) / 1000000)
self.parameters[sample].param(sample).\
param('Image').\
param('dy').setValue(
float(k.DY.text) / 1000000)
thingy[sample] = 1 # ???
elif k.attrib['Type'] == 'TRTSEMStageData':
xstage[sample][i.TRTJobEntry.JobName.text]\
= float(k.X.text)
ystage[sample][i.TRTJobEntry.JobName.text]\
= float(k.Y.text)
for sample in xstage:
if max_tiles[sample]['maxx'] > 1:
deltax = ((max(xstage[sample].values()) -
min(xstage[sample].values())) /
(max_tiles[sample]['maxx'] - 1)) / 1000000
else:
deltax = 0
self.parameters[sample].param(sample).param('stage').\
param('stage step x').setValue(deltax)
if max_tiles[sample]['maxy'] > 1:
deltay = ((max(ystage[sample].values()) -
min(ystage[sample].values())) /
(max_tiles[sample]['maxy'] - 1)) / 1000000
else:
deltay = 0
self.parameters[sample].param(sample).param('stage').\
param('stage step y').setValue(deltay)
self.populate_rect_dict(sample)
self.populate_img_dict(sample)
self.save_parameter_state()
self.sampleListModel.findItems(sample)[0].\
setBackground(col_good)
def estimate_final_array(self, sample):
"""function which iterates throught allready calculated
bounding rectangles of tiles, and by finding min of (left,top)
and max of (right,bottom), returns those values where first two
can be used as offset populating numpy array
"""
min_left, min_top = float('inf'), float('inf')
max_right, max_bottom = float('-inf'), float('-inf')
for i in self.rect[sample]:
stuff = self.rect[sample][i]
if stuff.rect().left() < min_left:
min_left = stuff.rect().left()
if stuff.rect().top() < min_top:
min_top = stuff.rect().top()
if stuff.rect().right() > max_right:
max_right = stuff.rect().right()
if stuff.rect().bottom() > max_bottom:
max_bottom = stuff.rect().bottom()
root = self.parameters[sample].param(sample)
dx = root.param('Image').param('dx').value()
dy = root.param('Image').param('dy').value()
left = min_left / dx
top = min_top / dy
width = (max_right - min_left) / dx
height = (max_bottom - min_top) / dy
return round(left), round(top), round(width), round(height)
class NineImg():
"""canvas, the pyqtgraph plot where 3x3 images are placed and updated
with basic parameters changed with UI"""
def __init__(self, sample):
v = myapp.ui.graphicsView
self.vb = pg.ViewBox()
self.vb.setAspectLocked()
self.vb.invertY()
v.setCentralItem(self.vb)
for i in myapp.img[sample]:
self.vb.addItem(myapp.img[sample][i])
self.vb.autoRange()
#### below have to be moved into main class, before the instation of
#### this class to connect all samples not just selected one at begining
#myapp.parameters[sample].sigTreeStateChanged.\
#connect(myapp.update_stitch)
exitFlag = 1
class ExportHdf5Window(QtGui.QDialog, Ui_ExportHdf5):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setupUi(self)
self.browseButton.pressed.connect(self.open_the_hdf5)
self.startButton.pressed.connect(self.start_sequence)
self.abortButton.pressed.connect(self.abort_sequence)
def open_the_hdf5(self):
pass
def start_sequence(self):
pass
def abort_sequence(self):
pass
class ExportImageWindow(QtGui.QDialog, Ui_FinalImage):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setupUi(self)
self.browseButton.pressed.connect(self.get_the_dir)
self.startButton.pressed.connect(self.start_sequence)
self.abortButton.pressed.connect(self.abort_sequence)
self.directory = ''
def get_the_dir(self):
self.directory = QtGui.QFileDialog.getExistingDirectory(
None,
'Select a folder:',
'/mnt/bulk-data/etc/extra_large_mapping/AL20-1',
QtGui.QFileDialog.ShowDirsOnly)
self.statusText.setText(self.directory)
def stitch_the_dimention(self, sample, plane):
#left offset, top offset, width, hight:
lo, to, w, h = myapp.estimate_final_array(sample)
mapping = np.zeros((h + 1, w + 1), dtype='uint8')
root = myapp.parameters[sample].param(sample)
width = root.param('Image').param('width').value()
height = root.param('Image').param('height').value()
dx = root.param('Image').param('dx').value()
dy = root.param('Image').param('dy').value()
left_cut = root.param('Image').param('cutoff').param('left').value()
right_cut = root.param('Image').param('cutoff').param('right').value()
top_cut = root.param('Image').param('cutoff').param('top').value()
bottom_cut = root.param('Image').param('cutoff').param('bottom').value()
val1 = 0 # initial progress bar value
step1 = 1 / len(tile_index[sample]) * 100 # progress bar step
self.progressBar_1.setValue(0) # we like progress... not bars... :P
for i in mapping_list[sample][plane]:
fn = mapping_list[sample][plane][i]
left = round(myapp.rect[sample][i].rect().left() / dx) - lo
top = round(myapp.rect[sample][i].rect().top() / dy) - to
right = left + width - left_cut - right_cut
bottom = top + height - top_cut - bottom_cut
if fn.rsplit('.', 1)[-1] == 'txt':
data = np.loadtxt(fn, delimiter=';')
else:
data = misc.imread(fn, flatten=1)
#for those nasty scaled bruker pngs...
if plane != base_image[sample]:
scaledown = len(np.unique(data))
data = data * scaledown / 255
# in case of signal beeing more than 8 bit increase the canvas
# bit depth:
if data.max() > 255 and mapping.dtype == 'uint8':
mapping = mapping.astype(np.float32, copy=False)
# copy the tile into canvas at the calculated place
mapping[top:bottom, left:right] =\
data[top_cut:height - bottom_cut, left_cut:width - right_cut]
self.statusText.setText(fn)
val1 += step1
self.progressBar_1.setValue(int(val1))
if final_list[sample][plane] != 'None':
a_filter = final_list[sample][plane]
self.statusText.setText('applying ' + a_filter + ' filter')
fp = []
for i in filter_parameter_list[sample][plane][final_list[sample][plane]]:
fp.append(i['value'])
mapping[:] = filterImage(mapping, a_filter, *fp)
if mapping.dtype == 'float32':
mapping = mapping.astype(np.uint16, copy=False)
return mapping
def start_sequence(self):
global abort
self.startButton.setEnabled(False)
self.closeButton.setEnabled(False)
self.abortButton.setEnabled(True)
self.imageFormat.setEnabled(False)
self.browseButton.setEnabled(False)
abort = 0
self.progressBar_3.setValue(0)
finito_list = myapp.exportFinalTree()
step3 = 1 / len(finito_list) * 100
val3 = 0
im_format = self.imageFormat.currentText().strip('\*')
for sample in finito_list:
step2 = 1 / len(finito_list[sample]['signals']) * 100
val2 = 0
self.progressBar_2.setValue(0)
for plane in finito_list[sample]['signals']:
thingy = self.stitch_the_dimention(sample, plane)
root = myapp.parameters[sample].param(sample).param('Image')
name_wo_suffix = '/'.join([self.directory,
'_'.join([finito_list[sample]['name'],
finito_list[sample]['signals'][plane]])])
if im_format == '.tif':
tf.imsave(''.join([name_wo_suffix, im_format]), thingy)
else:
misc.imsave(''.join([name_wo_suffix, im_format]), thingy)
world = open(''.join([name_wo_suffix,
''.join([im_format, 'w'])]),
"w")
world.write(str(root.param('dx').value()))
world.write('\n0\n0\n')
world.write(str(-root.param('dy').value()))
world.write('\n1\n1')
world.close()
val2 += step2
self.progressBar_2.setValue(int(val2))
if abort == 1:
break
val3 += step3
self.progressBar_3.setValue(int(val3))
self.progressBar_1.setValue(100)
self.progressBar_2.setValue(100)
self.progressBar_3.setValue(100)
self.statusText.setText('done!')
self.unfreeze_buttons()
def abort_sequence(self):
global exitFlag
exitFlag = 0
def unfreeze_buttons(self):
self.startButton.setEnabled(True)
self.closeButton.setEnabled(True)
self.abortButton.setEnabled(False)
self.imageFormat.setEnabled(True)
self.browseButton.setEnabled(True)
class GenericThread(QtCore.QThread):
def __init__(self, function, *args, **kwargs):
QtCore.QThread.__init__(self)
self.function = function
self.args = args
self.kwargs = kwargs
def __del__(self):
self.wait()
def run(self):
self.function(*self.args, **self.kwargs)
return
if __name__ == "__main__":
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
QtGui.QApplication.setPalette(QtGui.QApplication.style().standardPalette())
app = QtGui.QApplication(sys.argv)
myapp = StartQT4()
myapp.show()
sys.exit(app.exec_())
|
sem-geologist/Qstitch
|
Qstitch.py
|
Python
|
gpl-2.0
| 70,991
|
[
"Gaussian"
] |
01988b4e6192df10ed2f5b98bfc0d5284e5bc8b528edfdd835565c11759bd599
|
"""Module make_residimage.
It calculates residual image from the list of gaussians and shapelets
"""
import numpy as N
from scipy import stats # for skew and kurtosis
from image import *
from shapelets import *
import mylogger
### Insert attribute into Image class for model image
Image.resid_gaus = NArray(doc="Residual image calculated from " \
"extracted gaussians")
Image.resid_shap = NArray(doc="Residual image calculated from " \
"shapelet coefficient")
Image.model_gaus = NArray(doc="Model image calculated from " \
"extracted gaussians")
Image.model_shap = NArray(doc="Model image calculated from " \
"shapelet coefficient")
class Op_make_residimage(Op):
"""Creates an image from the fitted gaussians
or shapelets.
The resulting model image is stored in the
resid_gaus or resid_shap attribute.
Prerequisites: module gausfit or shapelets should
be run first.
"""
def __call__(self, img):
import functions as func
from copy import deepcopy as cp
import os
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"ResidImage")
mylog.info("Calculating residual image after subtracting reconstructed gaussians")
shape = img.ch0_arr.shape
thresh= img.opts.fittedimage_clip
resid_gaus = cp(img.ch0_arr)
model_gaus = N.zeros(shape, dtype=N.float32)
for g in img.gaussians:
C1, C2 = g.centre_pix
if hasattr(g, 'wisland_id') and img.waveletimage:
isl = img.islands[g.wisland_id]
else:
isl = img.islands[g.island_id]
b = self.find_bbox(thresh*isl.rms, g)
bbox = N.s_[max(0, int(C1-b)):min(shape[0], int(C1+b+1)),
max(0, int(C2-b)):min(shape[1], int(C2+b+1))]
x_ax, y_ax = N.mgrid[bbox]
ffimg = func.gaussian_fcn(g, x_ax, y_ax)
resid_gaus[bbox] = resid_gaus[bbox] - ffimg
model_gaus[bbox] = model_gaus[bbox] + ffimg
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(img.mask_arr, N.ndarray):
pix_masked = N.where(img.mask_arr == True)
model_gaus[pix_masked] = N.nan
resid_gaus[pix_masked] = N.nan
img.model_gaus_arr = model_gaus
img.resid_gaus_arr = resid_gaus
if img.opts.output_all:
if img.waveletimage:
resdir = img.basedir + '/wavelet/residual/'
moddir = img.basedir + '/wavelet/model/'
else:
resdir = img.basedir + '/residual/'
moddir = img.basedir + '/model/'
if not os.path.exists(resdir): os.makedirs(resdir)
if not os.path.exists(moddir): os.makedirs(moddir)
func.write_image_to_file(img.use_io, img.imagename + '.resid_gaus.fits', resid_gaus, img, resdir)
mylog.info('%s %s' % ('Writing', resdir+img.imagename+'.resid_gaus.fits'))
func.write_image_to_file(img.use_io, img.imagename + '.model.fits', (img.ch0_arr - resid_gaus), img, moddir)
mylog.info('%s %s' % ('Writing', moddir+img.imagename+'.model_gaus.fits'))
### residual rms and mean per island
for isl in img.islands:
resid = resid_gaus[isl.bbox]
self.calc_resid_mean_rms(isl, resid, type='gaus')
# Calculate some statistics for the Gaussian residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_gaus[non_masked], axis=None)
std_dev = N.std(resid_gaus[non_masked], axis=None)
skew = stats.skew(resid_gaus[non_masked], axis=None)
kurt = stats.kurtosis(resid_gaus[non_masked], axis=None)
stat_msg = "Statistics of the Gaussian residual image:\n"
stat_msg += " mean: %.3e (Jy/beam)\n" % mean
stat_msg += " std. dev: %.3e (Jy/beam)\n" % std_dev
stat_msg += " skew: %.3f\n" % skew
stat_msg += " kurtosis: %.3f" % kurt
mylog.info(stat_msg)
# Now residual image for shapelets
if img.opts.shapelet_do:
mylog.info("Calculating residual image after subtracting reconstructed shapelets")
shape = img.ch0_arr.shape
fimg = N.zeros(shape, dtype=N.float32)
for isl in img.islands:
if isl.shapelet_beta > 0: # make sure shapelet has nonzero scale for this island
mask=isl.mask_active
cen=isl.shapelet_centre-N.array(isl.origin)
basis, beta, nmax, cf = isl.shapelet_basis, isl.shapelet_beta, \
isl.shapelet_nmax, isl.shapelet_cf
image_recons=reconstruct_shapelets(isl.shape, mask, basis, beta, cen, nmax, cf)
fimg[isl.bbox] += image_recons
model_shap = fimg
resid_shap = img.ch0_arr - fimg
# Apply mask to model and resid images
if hasattr(img, 'rms_mask'):
mask = img.rms_mask
else:
mask = img.mask_arr
if isinstance(mask, N.ndarray):
pix_masked = N.where(mask == True)
model_shap[pix_masked] = N.nan
resid_shap[pix_masked] = N.nan
img.model_shap_arr = model_shap
img.resid_shap_arr = resid_shap
if img.opts.output_all:
func.write_image_to_file(img.use_io, img.imagename + '.resid_shap.fits', resid_shap, img, resdir)
mylog.info('%s %s' % ('Writing ', resdir+img.imagename+'.resid_shap.fits'))
### shapelet residual rms and mean per island
for isl in img.islands:
resid = resid_shap[isl.bbox]
self.calc_resid_mean_rms(isl, resid, type='shap')
# Calculate some statistics for the Shapelet residual image
non_masked = N.where(~N.isnan(img.ch0_arr))
mean = N.mean(resid_shap[non_masked], axis=None)
std_dev = N.std(resid_shap[non_masked], axis=None)
skew = stats.skew(resid_shap[non_masked], axis=None)
kurt = stats.kurtosis(resid_shap[non_masked], axis=None)
mylog.info("Statistics of the Shapelet residual image:")
mylog.info(" mean: %.3e (Jy/beam)" % mean)
mylog.info(" std. dev: %.3e (Jy/beam)" % std_dev)
mylog.info(" skew: %.3f" % skew)
mylog.info(" kurtosis: %.3f" % kurt)
img.completed_Ops.append('make_residimage')
return img
def find_bbox(self, thresh, g):
"""Calculate bounding box for gaussian.
This function calculates size of the box for evaluating
gaussian, so that value of gaussian is smaller than threshold
outside of the box.
Parameters:
thres: threshold
g: Gaussian object
"""
from math import ceil, sqrt, log
A = g.peak_flux
S = g.size_pix[0]
if A == 0.0:
return ceil(S*1.5)
if thresh/A >= 1.0 or thresh/A <= 0.0:
return ceil(S*1.5)
return ceil(S*sqrt(-2*log(thresh/A)))
def calc_resid_mean_rms(self, isl, resid, type):
"""Inserts mean and rms of residual image into isl, src, and gaussians
type - specifies 'gaus' or 'shap'
"""
if len(isl.gaul) == 0:
resid = N.zeros(isl.shape, dtype=N.float32)
ind = N.where(~isl.mask_active)
resid = resid[ind]
if type == 'gaus':
isl.gresid_rms = N.std(resid)
isl.gresid_mean = N.mean(resid)
else:
isl.sresid_rms = N.std(resid)
isl.sresid_mean = N.mean(resid)
if hasattr(isl, 'sources'):
for src in isl.sources:
if type == 'gaus':
src.gresid_rms = N.std(resid)
src.gresid_mean = N.mean(resid)
else:
src.sresid_rms = N.std(resid)
src.sresid_mean = N.mean(resid)
for g in src.gaussians:
if type == 'gaus':
g.gresid_rms = N.std(resid)
g.gresid_mean = N.mean(resid)
else:
g.sresid_rms = N.std(resid)
g.sresid_mean = N.mean(resid)
if hasattr(isl, 'dsources'):
for dsrc in isl.dsources: # Handle dummy sources (if any)
if type == 'gaus':
dsrc.gresid_rms = N.std(resid)
dsrc.gresid_mean = N.mean(resid)
else:
dsrc.sresid_rms = N.std(resid)
dsrc.sresid_mean = N.mean(resid)
|
jjdmol/LOFAR
|
CEP/PyBDSM/src/python/make_residimage.py
|
Python
|
gpl-3.0
| 8,960
|
[
"Gaussian"
] |
582a8cbc5e8fbf036443e5bf02ca93ad2f7514e44eeff24c14355a0de6e417b1
|
"""
.. module: XCScan
: platform: Windows
.. moduleauthor: : Daniel R. Dietze <daniel.dietze@berkeley.edu>
Measure a 2D contour map consisting of camera signal vs delay time. Can be used to measure Kerr cross-correlation,
TA-maps, FSRS-maps or dT/T-maps. Allows fitting of the data columnwise to a Gaussian to determine probe chirp and IRF.
Data are saved as TAB-delimited (N+1)-column ASCII files (time, N-frequency columns), where the frequency columns
depend on the measurement mode.
..
This file is part of the pyFSRS app.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http: //www.gnu.org/licenses/>.
Copyright 2014-2016 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import wx
import numpy as np
import time
import os
import scipy.optimize as spo
import itertools
import core.FSRSModule as module
import core.FSRSPlot as FSRSplot
import core.FSRSutils as cutils
# ##########################################################################################################################
# base class for any experiment
class XCScan(module.Experiment):
def __init__(self):
module.Experiment.__init__(self)
self.name = "XC Scan"
# stores the camera module
self.cameras = []
self.axes = []
self.shutters = []
# stores the 2d data, each column is a new timepoint
self.data = []
self.points = []
self.bg = []
# stores the plotting window stuff
self.plotWnd = None
self.plotInit = False
self.plotID = 0
self.running = False
# when creating the properties, you should create a start/stop button with the label "Start"
prop = []
prop.append({"label": "Camera", "type": "choice", "choices": [], "value": 0})
prop.append({"label": "Type", "type": "choice", "choices": ["FSRS", "TA", "T/T0", "Kerr"], "value": 3})
prop.append({"label": "# of Frames", "type": "spin", "value": 100, "info": (2, 20000)})
prop.append({"label": "Axis", "type": "choice", "choices": [], "value": 0})
prop = cutils.appendStageParameters(prop, -300, 300, 20)
prop.append({"label": "Shutter", "type": "choice", "choices": [], "value": 0})
prop.append({"label": "Save Last", "type": "button", "value": "Save", "event": "onSave"})
prop.append({"label": "Progress", "type": "progress", "value": 0})
prop.append({"label": "Start", "type": "button", "value": "Scan", "event": "onStart"})
prop.append({"label": "Fit", "type": "button", "value": "Fit", "event": "onFit"})
self.parsePropertiesDict(prop)
self.data = np.array([])
def initialize(self, others=[]):
module.Experiment.initialize(self, others)
# look for input modules
self.cameras = []
self.axes = []
self.shutters = []
axeschoices = []
ccdchoices = []
shutterchoices = []
for m in others:
if m.type == "input" and hasattr(m, "readNframes"):
self.cameras.append(m)
ccdchoices.append(str(m.name))
if m.type == "output":
self.shutters.append(m)
shutterchoices.append(str(m.name))
if m.type == "axis":
self.axes.append(m)
axeschoices.append(str(m.name))
self.getPropertyByLabel("camera").setChoices(ccdchoices)
self.getPropertyByLabel("axis").setChoices(axeschoices)
self.getPropertyByLabel("shutter").setChoices(shutterchoices)
def onFit(self, event):
if len(self.data) == 0:
wx.MessageBox("Nothing to fit yet!", "Fit Last Scan", style=wx.OK)
return
gauss = lambda x, y0, A, x0, dx: y0 + A * np.power(16.0, -(x - x0)**2 / dx**2)
self.getPropertyByLabel("progress").setValue(0)
x = self.points
dtmp = self.data.T
pos = []
width = []
i = 0
errors = False
for d in dtmp:
try:
popt, pcov = spo.curve_fit(gauss, x, d, [d[0], np.amax(d), x[np.argmax(d)], (x[-1] - x[0]) / 5])
pos.append(popt[2])
width.append(popt[3])
i += 1
self.getPropertyByLabel("progress").setValue(i * 100 / len(dtmp))
except:
pos.append(0.0)
width.append(0.0)
errors = True
pos = np.array(pos)
width = np.array(width)
self.getPropertyByLabel("progress").setValue(0)
if errors:
print "There were errors during the fitting. Could not determine parameters."
else:
print "Dispersion: ", np.amax(pos) - np.amin(pos), "fs"
print "Mean Width: ", np.mean(width), "+-", np.std(width), "fs"
if np.argmax(pos) - np.argmin(pos) > 0:
print "You should remove prism from probe."
else:
print "You should add prism to probe."
plframe = FSRSplot.FSRSDualPlotFrame(None, title="Fit Results", size=(640, 480))
plframe.upperPlotCanvas.setYLabel("Position (fs)")
plframe.lowerPlotCanvas.setYLabel("Width (fs)")
plframe.lowerPlotCanvas.setXLabel("Wavelength (px)")
plframe.upperPlotCanvas.addLine(np.arange(len(pos)), pos)
plframe.lowerPlotCanvas.addLine(np.arange(len(pos)), width)
plframe.Show()
def onSave(self, event):
if len(self.data) == 0:
wx.MessageBox("Nothing to save yet!", "Save Last Scan", style=wx.OK)
return
dlg = wx.FileDialog(None, "Save Last Scan", os.getcwd(), "", "*.*", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# set new working directory
directory = os.path.split(filename)
if not os.path.isdir(filename):
os.chdir(directory[0])
cutils.saveXC(filename, self.points, self.data)
dlg.Destroy()
def onAxisRangeChange(self, event):
cutils.onAxisRangeChange(self, event)
def onStart(self, event=None):
if self.running:
module.Experiment.stop(self)
else:
if self.plotWnd is not None:
self.plotWnd.Destroy()
self.plotInit = False
self.plotID = 0
self.plotWnd = FSRSplot.PlotFrame(None, title=time.strftime("XC Scan"), size=(640, 640))
self.plotWnd.plotCanvas.tightx = True
self.plotWnd.plotCanvas.tighty = True
self.plotWnd.Show()
self.data = []
self.points = []
self.bg = []
s_type = self.getPropertyByLabel("type").getValue()
s_ccd = self.cameras[self.getPropertyByLabel("camera").getValue()]
s_axis = self.axes[self.getPropertyByLabel("axis").getValue()]
s_shutter = self.shutters[self.getPropertyByLabel("shutter").getValue()]
s_frames = int(self.getPropertyByLabel("frames").getValue())
self.points = cutils.prepareScanPoints(self)
self.s_points_iterator = itertools.cycle(self.points)
self.progress_iterator = itertools.cycle(np.arange(len(self.points) * 1 + 1) * 100 / (len(self.points) * 1))
self.getPropertyByLabel("progress").setValue(next(self.progress_iterator))
self.running = True
module.Experiment.start(self, ScanThread, type=s_type, ccd=s_ccd, axis=s_axis, shutter=s_shutter, frames=s_frames, points=self.points, sets=1)
def onFinished(self):
# wait for thread to exit cleanly
module.Experiment.onFinished(self)
self.plotWnd = None
self.plotInit = False
self.plotID = 0
self.running = False
def onUpdate(self, val):
# prepare data
A, B, C = val
mode = self.getPropertyByLabel("type").getValue()
if mode == 0:
A = -np.log(A)
elif mode == 1:
A = -np.log10(A)
if mode != 3 or self.bg != []:
if mode == 3:
A = 0.5 * (B + C) - self.bg
if len(self.data) == 0:
self.data = np.array([A])
else:
self.data = np.vstack([self.data, A])
# update progress bar
self.getPropertyByLabel("progress").setValue(next(self.progress_iterator))
# plot in window
if isinstance(self.plotWnd, wx.Frame):
if self.plotInit:
self.plotWnd.plotCanvas.setImage(self.plotID, np.arange(len(A)), self.points[0: len(self.data)], self.data)
else:
self.plotInit = True
self.plotID = self.plotWnd.plotCanvas.addImage(np.arange(len(A)), self.points[0: len(self.data)], self.data)
else:
# user closed the plotWindow -> stop thread
self.onStart()
else:
self.bg = 0.5 * (B + C)
# ################################################################################
# helper class for experiment providing the actual scan thread
class ScanThread(module.ExperimentThread):
def __init__(self, parent, **argv):
module.ExperimentThread.__init__(self, parent)
self.ccd = argv['ccd']
self.frames = argv['frames']
self.shutter = argv['shutter']
self.axis = argv['axis']
self.points = argv['points']
self.sets = argv['sets']
self.type = argv['type']
# this is the actual scan routine
def run(self):
# send started-Event
wx.CallAfter(self.parent.onStarted)
cset = 0
# wait 500ms
time.sleep(0.1)
# background correction for Kerr
if self.type == 3:
# close shutter
self.shutter.write(0)
# read background frame
if self.canQuit.isSet() == 0:
val = self.ccd.readNframes(self.frames, self.canQuit)
# send to gui
wx.CallAfter(self.parent.onUpdate, val)
# open shutter
self.shutter.write(1)
# enter main loop
while(self.canQuit.isSet() == 0 and cset < self.sets):
cpoint = 0
# move to first point
self.axis.goto(self.points[cpoint])
while(cpoint < len(self.points) and self.canQuit.isSet() == 0):
# wait for axis to finish moving
while self.axis.is_moving() and self.canQuit.isSet() == 0:
time.sleep(0.1)
# read
if self.canQuit.isSet() == 0:
val = self.ccd.readNframes(self.frames, self.canQuit)
# send data to main GUI
wx.CallAfter(self.parent.onUpdate, val)
cpoint += 1
# move to next point
self.axis.goto(self.points[cpoint % len(self.points)])
cset += 1
# return axis to zero
self.axis.goto(0.0)
# close shutter
self.shutter.write(0)
# send terminated-Event
wx.CallAfter(self.parent.onFinished)
|
ddietze/pyFSRS
|
installed_modules/xFSRS/XCScan.py
|
Python
|
gpl-3.0
| 12,082
|
[
"Gaussian"
] |
8477f590b5a42d87c291c63830b2c457ed2460494aa73db9cb2c4e646c0fb4da
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
.. currentmodule:: pylayers.simul.simultraj
.. autosummary::
"""
import doctest
import numpy as np
import copy
import matplotlib.pylab as plt
import pylayers.util.pyutil as pyu
import pylayers.signal.waveform as wvf
from pylayers.signal.device import Device
# Handle Layout
from pylayers.gis.layout import Layout
# Handle VectChannel and ScalChannel
from pylayers.antprop import antenna
from pylayers.network.network import Network
from pylayers.simul.link import *
from pylayers.measures.cormoran import *
# Handle directory hierarchy
from pylayers.util.project import *
# Handle UWB measurements
import pylayers.mobility.trajectory as tr
from pylayers.mobility.ban.body import *
from pylayers.antprop.statModel import *
import pandas as pd
import csv
class Simul(PyLayers):
"""
Link oriented simulation
A simulation requires :
+ A Layout
+ A Person
+ A Trajectory
or a CorSer instance
Members
-------
dpersons : dictionnary of persons (agent)
dap : dictionnary of access points
Methods
-------
load_simul : load configuration file
load_Corser : load a Corser file
_gen_net : generate network and asociated links
show : show layout and network
evaldeter : run simulation over time
"""
def __init__(self, source ='simulnet_TA-Office.h5',verbose=False):
""" object constructor
Parameters
----------
source : string
h5 trajectory file default simulnet_TA-Office.h5
verbose : boolean
Notes
-----
The simultraj has a dataframe
"""
# self.progress = -1 # simulation not loaded
self.verbose = verbose
self.cfield = []
self.dpersons = {}
self.dap = {}
self.Nag = 0
self.Nap = 0
# source analysis
if isinstance(source,str):
self.filetraj = source
self.load_simul(source)
self.source = 'simul'
elif 'pylayers' in source.__module__:
self.filetraj = source._filename
self.load_CorSer(source)
cutoff=2
self.source = 'CorSer'
# generate the Network
# The wireless standard and frequency is fixed in this function
#
self._gen_net()
# initialize Stochastic Link
self.SL = SLink()
# initialize Deterministic Link
self.DL = DLink(L=self.L,verbose=self.verbose)
self.DL.cutoff=cutoff
self.filename = 'simultraj_' + self.filetraj + '.h5'
# data is a panda container which is initialized
#
# We do not save all the simulation in a DataFRame anymore
#
#self.data = pd.DataFrame(columns=['id_a', 'id_b',
# 'x_a', 'y_a', 'z_a',
# 'x_b', 'y_b', 'z_b',
# 'd', 'eng', 'typ',
# 'wstd', 'fcghz',
# 'fbminghz', 'fbmaxghz', 'fstep', 'aktk_id',
# 'sig_id', 'ray_id', 'Ct_id', 'H_id'
# ])
#self.data.index.name='t'
self._filecsv = self.filename.split('.')[0] + '.csv'
self.todo = {'OB': True,
'B2B': True,
'B2I': True,
'I2I': False}
filenameh5 = pyu.getlong(self.filename,pstruc['DIRLNK'])
if os.path.exists(filenameh5) :
self.loadpd()
self.settime(0.)
# self._saveh5_init()
def __repr__(self):
s = 'Simul trajectories class\n'
s = s + '------------------------\n'
s = s +'\n'
s = s + 'Used layout: ' + self.L.filename + '\n'
s = s + 'Number of Agents: ' + str(self.Nag) + '\n'
s = s + 'Number of Access Points: ' + str(self.Nap) + '\n'
s = s + 'Link to be evaluated: ' + str(self.todo) + '\n'
s = s + 'tmin: ' + str(self._tmin) + '\n'
s = s + 'tmax: ' + str(self._tmax) + '\n'
s = s +'\n'
# network info
s = s + 'self.N :\n'
s = s + self.N.__repr__() + '\n'
s = s + 'CURRENT TIME: ' + str(self.ctime) + '\n'
return s
def load_simul(self, source):
""" load a simultraj configuration file
Parameters
----------
source : string
name of simulation file to be loaded
"""
self.filetraj = source
if not os.path.isfile(source):
raise AttributeError('Trajectory file'+source+'has not been found.\
Please make sure you have run a simulnet simulation before runining simultraj.')
# get the trajectory
traj = tr.Trajectories()
traj.loadh5(self.filetraj)
# get the layout
self.L = Layout(traj.Lfilename)
# resample trajectory
for ut, t in enumerate(traj):
if t.typ == 'ag':
person = Body(t.name + '.ini')
tt = t.time()
self.dpersons.update({t.name: person})
self._tmin = tt[0]
self._tmax = tt[-1]
self.time = tt
else:
pos = np.array([t.x[0], t.y[0], t.z[0]])
self.dap.update({t.ID: {'pos': pos,
'ant': antenna.Antenna(),
'name': t.name
}
})
self.ctime = np.nan
self.Nag = len(self.dpersons.keys())
self.Nap = len(self.dap.keys())
self.traj = traj
def load_CorSer(self,source):
""" load CorSer file for simulation
Parameters
----------
source :
name of simulation file to be loaded
"""
if isinstance(source.B,Body):
B = [source.B]
elif isinstance(source.B,list):
B = source.B
elif isinstance(source.B,dict):
B=source.B.values()
else:
raise AttributeError('CorSer.B must be a list or a Body')
self.L = source.L
self.traj = tr.Trajectories()
self.traj.Lfilename=self.L._filename
for b in B:
self.dpersons.update({b.name: b})
self._tmin = b.time[0]
self._tmax = b.time[-1]
self.time = b.time
self.traj.append(b.traj)
for ap in source.din:
techno,ID=ap.split(':')
if techno == 'HKB':
techno = 'hikob'
if techno == 'TCR':
techno = 'tcr'
if techno == 'BS':
techno = 'bespoon'
self.dap.update({ap: {'pos': source.din[ap]['p'],
'ant': source.din[ap]['ant'],
'T': source.din[ap]['T'],
'name': techno
}
})
self.ctime = np.nan
self.Nag = len(B)
self.Nap = len(source.din)
self.corser = source
def _gen_net(self):
""" generate Network and associated links
Notes
-----
Create self.N : Network object
See Also
--------
pylayers.network.network
"""
#
# Create Network
#
N = Network()
#
# get devices on bodies
#
# forall person
# forall device
for p in self.dpersons:
D = []
for dev in self.dpersons[p].dev:
aDev = Device(self.dpersons[p].dev[dev]['name'], ID = dev)
D.append(aDev)
D[-1].ant['A1']['name'] = self.dpersons[p].dev[dev]['file']
D[-1].ant['antenna'] = self.dpersons[p].dev[dev]['ant']
N.add_devices(D, grp=p)
#
# get access point devices
#
for ap in self.dap:
D = Device(self.dap[ap]['name'], ID = ap)
D.ant['antenna'] = self.dap[ap]['ant']
N.add_devices(D, grp = 'ap', p = self.dap[ap]['pos'])
N.update_orient(ap, self.dap[ap]['T'], now = 0.)
# create Network
#
# _get_wstd
# _get_grp
# _connect
# _init_PN
#
N.create()
self.N = N
def show(self):
""" show actual simlulation configuration
"""
fig, ax = self.L.showGs()
fig, ax = self.N.show(fig=fig, ax=ax)
return fig, ax
def evaldeter(self, na, nb, wstd, fmod='force',nf=10,fGHz=[], **kwargs):
""" deterministic evaluation of a link
Parameters
----------
na : string:
node a id in self.N (Network)
nb : string:
node b id in self.N (Network)
wstd : string:
wireless standard used for commmunication between na and nb
fmode : string ('center'|'band'|'force')
mode of frequency evaluation
center : single frequency (center frequency of a channel)
band : nf points on the whole band
force : takes directly fGHz
nf : int:
number of frequency points (if fmode = 'band')
**kwargs : argument of DLink
Returns
-------
(a, t )
a : ndarray
alpha_k
t : ndarray
tau_k
See Also
--------
pylayers.simul.link.DLink
"""
# todo in network :
# take into consideration the postion and rotation of antenna and not device
self.DL.Aa = self.N.node[na]['ant']['antenna']
self.DL.a = self.N.node[na]['p']
self.DL.Ta = self.N.node[na]['T']
self.DL.Ab = self.N.node[nb]['ant']['antenna']
self.DL.b = self.N.node[nb]['p']
self.DL.Tb = self.N.node[nb]['T']
#
# The frequency band is chosen from the selected standard
# if fmode == 'center'
# only center frequency is calculated
#
#'
if fmod == 'center':
self.DL.fGHz = self.N.node[na]['wstd'][wstd]['fcghz']
if fmod == 'band':
fminGHz = self.N.node[na]['wstd'][wstd]['fbminghz']
fmaxGHz = self.N.node[na]['wstd'][wstd]['fbmaxghz']
self.DL.fGHz = np.linspace(fminGHz, fmaxGHz, nf)
if fmod == 'force':
assert len(fGHz)>0,"fGHz has not been defined"
self.DL.fGHz = fGHz
a, t = self.DL.eval(**kwargs)
return a, t
def evalstat(self, na, nb):
""" statistical evaluation of a link
Parameters
----------
na : string:
node a id in self.N (Netwrok)
nb : string:
node b id in self.N (Netwrok)
Returns
-------
(a, t, eng)
a : ndarray
alpha_k
t : ndarray
tau_k
eng : float
engagement
"""
pa = self.N.node[na]['p']
pb = self.N.node[nb]['p']
if self.source == 'simul':
dida, name = na.split('_')
didb, name = nb.split('_')
elif self.source =='CorSer':
bpa,absolutedida,dida,name,technoa = self.corser.devmapper(na)
bpb,absolutedidb,didb,name,technob = self.corser.devmapper(nb)
ak, tk, eng = self.SL.onbody(self.dpersons[name], dida, didb, pa, pb)
return ak, tk, eng
def settime(self,t):
""" set current time
"""
self.ctime = t
self._traj=copy.copy(self.traj)
self.update_pos(t)
def run(self, **kwargs):
""" run the link evaluation along a trajectory
Parameters
----------
OB: boolean
perform on body statistical link evaluation
B2B: boolean
perform body to body deterministic link evaluation
B2I: boolean
perform body to infrastructure deterministic link evaluation
I2I: boolean
perform infrastructure to infrastructure deterministic link eval.
links: dict
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
wstd: list
list of wstd to be evaluated
(if [], all wstd are considered)
t: np.array
list of timestamp to be evaluated
(if [], all timestamps are considered)
tbr : boolean
time in bit reverse order (tmin,tmax,N) Npoints=2**N
replace_data: boolean (True)
if True , reference id of all already simulated link will be erased
and replace by new simulation id
fGHz : np.array
frequency in GHz
Examples
--------
>>> from pylayers.simul.simultraj import *
>>> from pylayers.measures.cormoran import *
>>> C=CorSer(layout=True)
>>> S=Simul(C,verbose=True)
>>> link={'ieee802154':[]}
>>> link['ieee802154'].append(S.N.links['ieee802154'][0])
>>> lt = [0,0.2,0.3,0.4,0.5]
>>> S.run(links=link,t=lt)
"""
defaults = {'OB': True,
'B2B': True,
'B2I': True,
'I2I': False,
'links': {},
'wstd': [],
't': np.array([]),
'btr':True,
'DLkwargs':{},
'replace_data':True,
'fmod':'force',
'fGHz':np.array([2.45])
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
DLkwargs = kwargs.pop('DLkwargs')
links = kwargs.pop('links')
wstd = kwargs.pop('wstd')
OB = kwargs.pop('OB')
B2B = kwargs.pop('B2B')
B2I = kwargs.pop('B2I')
I2I = kwargs.pop('I2I')
fmod = kwargs.pop('fmod')
self.fGHz = kwargs.pop('fGHz')
self.todo.update({'OB':OB,'B2B':B2B,'B2I':B2I,'I2I':I2I})
# Check link attribute
if links == {}:
links = self.N.links
elif not isinstance(links, dict):
raise AttributeError('links is {wstd:[list of links]}, see self.N.links')
for k in links.keys():
checkl = [l in self.N.links[k] for l in links[k]]
if len(np.where(checkl==False)[0])>0:
# if sum(checkl) != len(self.N.links):
uwrong = np.where(np.array(checkl) is False)[0]
raise AttributeError(str(np.array(links)[uwrong])
+ ' links does not exist in Network')
wstd = links.keys()
# # Check wstd attribute
# if wstd == []:
# wstd = self.N.wstd.keys()
# elif not isinstance(wstd, list):
# wstd = [wstd]
checkw = [w in self.N.wstd.keys() for w in wstd]
if sum(checkw) != len(wstd):
uwrong = np.where(np.array(checkw) is False)[0]
raise AttributeError(str(np.array(wstd)[uwrong])
+ ' wstd are not in Network')
# force time attribute compliant
if not isinstance(kwargs['t'],np.ndarray):
if isinstance(kwargs['t'],list):
lt = np.array(kwargs['t'])
elif (isinstance(kwargs['t'], int)
or isinstance(kwargs['t'],float)):
lt = np.array([kwargs['t']])
else :
lt = kwargs['t']
#if len(lt) == 0:
# lt = self.time
# check time attribute
if kwargs['btr']:
if (lt[0] < self._tmin) or\
(lt[1] > self._tmax) :
raise AttributeError('Requested time range not available')
# self._traj is a copy of self.traj, which is affected by resampling.
# it is only a temporary attribute for a given run
# if len(lt) > 1:
# sf = 1/(1.*lt[1]-lt[0])
# self._traj = self.traj.resample(sf=sf, tstart=lt[0])
# else:
# self._traj = self.traj.resample(sf=1.0, tstart=lt[0])
# self._traj.time()
# self.time = self._traj.t
# self._time = pd.to_datetime(self.time,unit='s')
#
# Nested Loops
#
# time
# standard
# links
# evaldeter &| evalstat
#
#lt = self.get_sim_time(lt)
#self._time=self.get_sim_time(lt)
init = True
if kwargs['btr']:
tmin = lt[0]
tmax = lt[1]
Nt = int(2**lt[2])
ta = np.linspace(tmin,tmax,Nt)
it = np.hstack((np.r_[0],np.r_[pyu.bitreverse(Nt,int(lt[2]))]))
#trev = t[it]
else:
ta = kwargs['t']
it = range(len(ta))
## Start to loop over time
## ut : counter
## t : time value (s)
#for ut, t in enumerate(lt):
for ks,ut in enumerate(it):
t = ta[ut]
self.ctime = t
# update spatial configuration of the scene for time t
self.update_pos(t)
# print self.N.__repr__()
## Start to loop over available Wireless standard
##
for w in wstd:
## Start to loop over the chosen links stored in links
##
for na, nb, typ in links[w]:
# If type of link is valid (Body 2 Body,...)
#
if self.todo[typ]:
if self.verbose:
print('-'*30)
print('time:', t, '/', lt[-1] ,' time idx:', ut, '/',len(ta),'/',ks)
print('processing: ',na, ' <-> ', nb, 'wstd: ', w)
print('-'*30)
eng = 0
#
# Invoque link deterministic simulation
#
# node : na
# node : nb
# wstd : w
#
self.evaldeter(na, nb,
w,
applywav=False,
fmod = fmod,
fGHz = self.fGHz,
**DLkwargs)
# if typ == 'OB':
# self.evalstat(na, nb)
# eng = self.SL.eng
# L = self.DL + self.SL
# self._ak = L.H.ak
# self._tk = L.H.tk
# else :
# Get alphak an tauk
self._ak = self.DL.H.ak
self._tk = self.DL.H.tk
aktk_id = str(ut) + '_' + na + '_' + nb + '_' + w
# this is a dangerous way to proceed !
# the id as a finite number of characters
while len(aktk_id)<40:
aktk_id = aktk_id + ' '
df = pd.DataFrame({ 'id_a': na,
'id_b': nb,
'x_a': self.N.node[na]['p'][0],
'y_a': self.N.node[na]['p'][1],
'z_a': self.N.node[na]['p'][2],
'x_b': self.N.node[nb]['p'][0],
'y_b': self.N.node[nb]['p'][1],
'z_b': self.N.node[nb]['p'][2],
'd': self.N.edge[na][nb]['d'],
'eng': eng,
'typ': typ,
'wstd': w,
'fcghz': self.N.node[na]['wstd'][w]['fcghz'],
'fbminghz': self.fGHz[0],
'fbmaxghz': self.fGHz[-1],
'nf': len(self.fGHz),
'aktk_id':aktk_id,
'sig_id': self.DL.dexist['sig']['grpname'],
'ray_id': self.DL.dexist['ray']['grpname'],
'Ct_id': self.DL.dexist['Ct']['grpname'],
'H_id': self.DL.dexist['H']['grpname'],
},columns=['id_a', 'id_b',
'x_a', 'y_a', 'z_a',
'x_b', 'y_b', 'z_b',
'd', 'eng', 'typ',
'wstd', 'fcghz',
'fbminghz', 'fbmaxghz', 'fstep', 'aktk_id',
'sig_id', 'ray_id', 'Ct_id', 'H_id'
],index= [t]) #self._time[ut]])
self.savepd(df)
def replace_data(self, df):
"""check if a dataframe df already exists in self.data
Parameters
----------
df : pd.DataFrame
Returns
-------
boolean
True if already exists
False otherwise
"""
self.data[(self.data.index == df.index) &
(self.data['id_a'] == df['id_a'].values[0]) &
(self.data['id_b'] == df['id_b'].values[0]) &
(self.data['wstd'] == df['wstd'].values[0])]=df.values
def check_exist(self, df):
"""check if a dataframe df already exists in self.data
Parameters
----------
df : pd.DataFrame
Returns
-------
boolean
True if already exists
False otherwise
"""
# check init case
if not len(self.data.index) == 0:
ud = self.data[(self.data.index == df.index) &
(self.data['id_a'] == df['id_a'].values[0]) &
(self.data['id_b'] == df['id_b'].values[0]) &
(self.data['wstd'] == df['wstd'].values[0])]
if len(ud) == 0:
return False
else :
return True
else :
return False
def savepd(self,df):
""" save data information of a simulation
Parameters
----------
df : one index data
Notes
-----
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
store = pd.HDFStore(filenameh5)
#self.data=self.data.sort()
store.append('df',df)
store.close()
def loadpd(self):
""" load data from previous simulations
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
store = pd.HDFStore(filenameh5)
#self.data = pd.read_hdf(filenameh5,'df')
self.data = store.get('df')
self.data.index.name='t'
self.data = self.data.sort()
def get_sim_time(self,t):
""" retrieve closest time value in regard of passed t value in parameter
"""
if not isinstance(t,list) and not isinstance(t,np.ndarray):
return np.array([self.time[np.where(self.time <=t)[0][-1]]])
else :
return np.array([self.get_sim_time(tt) for tt in t])[:,0]
def get_df_from_link(self,id_a,id_b,wstd=''):
""" Return a restricted data frame for a specific link
Parameters
----------
id_a : str
node id a
id_b: str
node id b
wstd: str
optionnal :wireslees standard
"""
if wstd == '':
return self.data[(self.data['id_a']==id_a) &
(self.data['id_b']==id_b)]
else :
return self.data[(self.data['id_a']==id_a) &
(self.data['id_b']==id_b) &
self.data['wstd']==wstd]
def update_pos(self, t):
""" update positions of devices and bodies for a given time index
Parameters
----------
t : int
time value
"""
# if a bodies are involved in simulation
if ((self.todo['OB']) or (self.todo['B2B']) or (self.todo['B2I'])):
nodeid = []
pos = []
devlist = []
orient = []
for up, person in enumerate(self.dpersons.values()):
person.settopos(self._traj[up], t=t, cs=True)
name = person.name
dev = person.dev.keys()
devlist.extend(dev)
#nodeid.extend([n + '_' + name for n in dev])
pos.extend([person.dcs[d][:, 0] for d in dev])
orient.extend([person.acs[d] for d in dev])
# TODO !!!!!!!!!!!!!!!!!!!!
# in a future version , the network update must also update
# antenna position in the device coordinate system
self.N.update_pos(devlist, pos, now=t)
self.N.update_orient(devlist, orient, now=t)
self.N.update_dis()
def get_value(self,**kwargs):
""" retrieve output parameter at a specific time
Parameters
----------
typ : list
list of parameters to be retrieved
(R | C |H | ak | tk | rss )
links: list
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
t: int or np.array
list of timestamp to be evaluated | singlr time instant
Returns
-------
output: dict
[link_key]['t']
['ak']
...
"""
# get time
defaults = {'t': 0,
'typ':['ak'],
'links': {},
'wstd':[],
'angles':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
# allocate an empty dictionnary for wanted selected output
output={}
# manage time t can be a list or a float
t = kwargs['t']
t = self.get_sim_time(t)
dt = self.time[1]-self.time[0]
# manage links
plinks = kwargs['links']
links=[]
if isinstance(plinks,dict):
for l in plinks.keys():
links.extend(plinks[l])
if len(links) == 0:
raise AttributeError('Please give valid links to get values')
# output['t']=[]
# output['time_to_simul']=[]
# for each requested time step
for tt in t :
# for each requested links
for link in links:
linkname=link[0]+'-'+link[1]
if not output.has_key(linkname):
output[linkname] = {}
if not output[linkname].has_key('t'):
output[linkname]['t'] = []
# restrict global dataframe self.data to the specific link
df = self.get_df_from_link(link[0],link[1])
# restrict global dataframe self.data to the specific z
df = df[(df.index > tt-dt) & (df.index <= tt+dt)]
if len(df) != 0:
output[linkname]['t'].append(tt)
if len(df)>1:
print('Warning possible issue in self.get_value')
line = df.iloc[-1]
# # get info of the corresponding timestamp
# line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])].iloc[-1]
# if len(line) == 0:
# line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
# if len(line) == 0:
# raise AttributeError('invalid link')
#retrieve correct position and orientation given the time
#self.update_pos(t=tt)
# antennas positions
#self.DL.a = self.N.node[link[0]]['p']
#self.DL.b = self.N.node[link[1]]['p']
# antennas orientation
#self.DL.Ta = self.N.node[link[0]]['T']
#self.DL.Tb = self.N.node[link[1]]['T']
# antennas object
#self.DL.Aa = self.N.node[link[0]]['ant']['antenna']
#self.DL.Ab = self.N.node[link[1]]['ant']['antenna']
# get the antenna index
#uAa_opt, uAa = self.DL.get_idx('A_map',self.DL.Aa._filename)
#uAb_opt, uAb = self.DL.get_idx('A_map',self.DL.Ab._filename)
if 'ak' in kwargs['typ'] or 'tk' in kwargs['typ'] or 'rss' in kwargs['typ']:
H_id = line['H_id'].decode('utf8')
# load the proper link
# parse index
lid = H_id.split('_')
#if (lid[5]==str(uAa))&(lid[6]==str(uAb)):
self.DL.load(self.DL.H,H_id)
if 'ak' in kwargs['typ']:
if not output[linkname].has_key('ak'):
output[linkname]['ak']=[]
output[linkname]['ak'].append(copy.deepcopy(self.DL.H.ak))
if 'tk' in kwargs['typ']:
if not output[linkname].has_key('tk'):
output[linkname]['tk']=[]
output[linkname]['tk'].append(copy.deepcopy(self.DL.H.tk))
if 'rss' in kwargs['typ']:
if not output[linkname].has_key('rss'):
output[linkname]['rss']=[]
output[linkname]['rss'].append(copy.deepcopy(self.DL.H.rssi()))
if 'R' in kwargs['typ']:
if not output[linkname].has_key('R'):
output[linkname]['R']=[]
ray_id = line['ray_id']
self.DL.load(self.DL.R,ray_id)
output[linkname]['R'].append(copy.deepcopy(self.DL.R))
if 'C' in kwargs['typ']:
if not output[linkname].has_key('C'):
output[linkname]['C']=[]
Ct_id = line['Ct_id']
self.DL.load(self.DL.C,Ct_id)
if kwargs['angles']:
self.DL.C.islocal=False
self.DL.C.locbas(Tt=self.DL.Ta, Tr=self.DL.Tb)
#T channel
output[linkname]['C'].append(copy.deepcopy(self.DL.C))
if 'H' in kwargs['typ']:
if not output[linkname].has_key('H'):
output[linkname]['H']=[]
H_id = line['H_id']
lid = H_id.split('_')
#if (lid[5]==str(uAa))&(lid[6]==str(uAb)):
self.DL.load(self.DL.H,H_id)
output[linkname]['H'].append(copy.deepcopy(self.DL.H))
# if time value not found in dataframe
else:
if not output[linkname].has_key('time_to_simul'):
output[linkname]['time_to_simul'] = []
output[linkname]['time_to_simul'].append(tt)
for l in output.keys():
if output[l].has_key('time_to_simul'):
print('link', l , 'require simulation for timestamps', output[l]['time_to_simul'])
return(output)
def get_link(self,**kwargs):
""" retrieve a Link specific time from a simultraj
Parameters
----------
typ : list
list of parameters to be retrieved
(ak | tk | R |C)
links: list
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
t: int or np.array
list of timestamp to be evaluated | singlr time instant
Returns
-------
DL : DLink
Examples
--------
>>> from pylayers.simul.simultraj import *
>>> from pylayers.measures.cormoran import *
>>> C=CorSer(serie=6,day=11,layout=True)
>>> S = Simul(C,verbose=False)
>>> DL = S.get_link(typ=['R','C','H'])
"""
# get time
defaults = {'t': 0,
'typ':['ak'],
'links': {},
'wstd':[],
'angles':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
output={}
# manage time
t = kwargs['t']
t = self.get_sim_time(t)
dt = self.time[1]-self.time[0]
# manage links
plinks = kwargs['links']
links=[]
if isinstance(plinks,dict):
for l in plinks.keys():
links.extend(plinks[l])
if len(links) == 0:
raise AttributeError('Please give valid links to get values')
# output['t']=[]
# output['time_to_simul']=[]
# for each requested time step
for tt in t :
# for each requested links
for link in links:
linkname=link[0]+'-'+link[1]
if not output.has_key(linkname):
output[linkname] = {}
if not output[linkname].has_key('t'):
output[linkname]['t'] = []
# restrict global dataframe self.data to the specific link
df = self.get_df_from_link(link[0],link[1])
# restrict global dataframe self.data to the specific z
df = df[(df.index > tt-dt) & (df.index <= tt+dt)]
if len(df) != 0:
output[linkname]['t'].append(tt)
if len(df)>1:
print('Warning possible issue in self.get_link')
line = df.iloc[-1]
# # get info of the corresponding timestamp
# line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])].iloc[-1]
# if len(line) == 0:
# line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
# if len(line) == 0:
# raise AttributeError('invalid link')
#retrieve correct position and orientation given the time
self.update_pos(t=tt)
self.DL.a = self.N.node[link[0]]['p']
self.DL.b = self.N.node[link[1]]['p']
self.DL.Ta = self.N.node[link[0]]['T']
self.DL.Tb = self.N.node[link[1]]['T']
#self.DL.Aa = self.N.node[link[0]]['ant']['antenna']
#self.DL.Ab = self.N.node[link[1]]['ant']['antenna']
#H_id = line['H_id'].decode('utf8')
#self.DL.load(self.DL.H,H_id)
if 'R' in kwargs['typ']:
ray_id = line['ray_id']
self.DL.load(self.DL.R,ray_id)
if 'C' in kwargs['typ']:
Ct_id = line['Ct_id']
self.DL.load(self.DL.C,Ct_id)
if kwargs['angles']:
self.DL.C.islocal=False
self.DL.C.locbas(Tt=self.DL.Ta, Tr=self.DL.Tb)
if 'H' in kwargs['typ']:
H_id = line['H_id']
self.DL.load(self.DL.H,H_id)
return(self.DL)
def _show3(self, **kwargs):
""" 3D show using Mayavi
Parameters
----------
t: float
time index
link: list
[id_a, id_b]
id_a : node id a
id_b : node id b
'lay': bool
show layout
'net': bool
show net
'body': bool
show bodies
'rays': bool
show rays
"""
defaults = {'t': 0,
'link': [],
'wstd':[],
'lay': True,
'net': True,
'body': True,
'rays': True,
'ant': False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
link = kwargs['link']
self.update_pos(kwargs['t'])
if len(self.data) != 0:
df = self.data[self.data.index == pd.to_datetime(kwargs['t'])]
if len(df) != 0:
raise AttributeError('invalid time')
# default
if link ==[]:
line = df[df.index<=pd.to_datetime(0)]
link = [line['id_a'].values[0],line['id_b'].values[0]]
else :
# get info of the corresponding timestamp
line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])]
if len(line) == 0:
line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
if len(line) == 0:
raise AttributeError('invalid link')
rayid = line['ray_id'].values[0]
self.DL.a = self.N.node[link[0]]['p']
self.DL.b = self.N.node[link[1]]['p']
self.DL.Ta = self.N.node[link[0]]['T']
self.DL.Tb = self.N.node[link[1]]['T']
self.DL.load(self.DL.R,rayid)
self.DL._show3(newfig= False,
lay= kwargs['lay'],
rays= kwargs['rays'],
ant=False)
else :
self.DL._show3(newfig= False,
lay= True,
rays= False,
ant=False)
if kwargs['net']:
self.N._show3(newfig=False)
if kwargs['body']:
for p in self.dpersons:
self.dpersons[p]._show3(newfig=False,
topos=True,
pattern=kwargs['ant'])
# def _saveh5_init(self):
# """ initialization of the h5py file
# """
# filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
# import ipdb
# try:
# f5 = h5py.File(filenameh5, 'w')
# f5.create_dataset('time', shape=self.time.shape, data=self.time)
# f5.close()
# except:
# f5.close()
# raise NameError('simultra.saveinit: \
# issue when writting h5py file')
def _saveh5(self, ut, ida, idb, wstd):
""" Save in h5py format
Parameters
----------
ut : int
time index in self.time
ida : string
node a index
idb : string
node b index
wstd : string
wireless standard of used link
Notes
-----
Dataset organisation:
simultraj_<trajectory_filename.h5>.h5
|
|time
| ...
|
|/<tidx_ida_idb_wstd>/ |attrs
| |a_k
| |t_k
Root dataset :
time : array
range of simulation time
Group identifier :
tidx : index in time dataset
ida : node a index in Network
idb : node b index in Network
wstd : wireless standar of link interest
Inside group:
a_k : alpha_k values
t_k : tau_k values
See Also
--------
pylayers.simul.links
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
grpname = str(ut) + '_' + ida + '_' + idb + '_' + wstd
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5 = h5py.File(filenameh5, 'a')
if not grpname in fh5.keys():
fh5.create_group(grpname)
f = fh5[grpname]
# for k in kwargs:
# f.attrs[k] = kwargs[k]
f.create_dataset('alphak',
shape=self._ak.shape,
maxshape=(None),
data=self._ak)
f.create_dataset('tauk',
shape=self._tk.shape,
maxshape=(None),
data=self._tk)
else:
pass#print grpname + ' already exists in ' + filenameh5
fh5.close()
except:
fh5.close()
raise NameError('Simultraj._saveh5: issue when writting h5py file')
def _loadh5(self, grpname):
""" Load in h5py format
Parameters
----------
grpname : string
group name which can be found sin self.data aktk_idx column
Returns
-------
(ak, tk, conf)
ak : ndarray:
alpha_k
tk : ndarray:
alpha_k
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5 = h5py.File(filenameh5, 'r')
if not grpname in fh5.keys():
fh5.close()
raise NameError(grpname + ' cannot be reached in ' + self.filename)
f = fh5[grpname]
# for k in f.attrs.keys():
# conf[k]=f.attrs[k]
ak = f['alphak'][:]
tk = f['tauk'][:]
fh5.close()
return ak, tk
except:
fh5.close()
raise NameError('Simultraj._loadh5: issue when reading h5py file')
def tocsv(self, ut, ida, idb, wstd,init=False):
filecsv = pyu.getlong(self._filecsv,pstruc['DIRLNK'])
with open(filecsv, 'a') as csvfile:
fil = csv.writer(csvfile, delimiter=';',
quoting=csv.QUOTE_MINIMAL)
if init:
keys = self.data.iloc[-1].keys()
data = [k for k in keys]
data .append('ak')
data .append('tk')
fil.writerow(data)
values = self.data.iloc[-1].values
data = [v for v in values]
sak = str(self._ak.tolist())
stk = str(self._tk.tolist())
data.append(sak)
data.append(stk)
fil.writerow(data)
if (__name__ == "__main__"):
#plt.ion()
doctest.testmod()
|
pylayers/pylayers
|
pylayers/simul/simultraj.py
|
Python
|
mit
| 43,608
|
[
"Mayavi"
] |
26c53bb20f07461e4cee52b8d4faa9595008d493445fec790413e52b8180eafa
|
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "Colour Developers" # (translatable)
BLOG_TITLE = "Colour Science" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "https://www.colour-science.org/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
BASE_URL = "https://www.colour-science.org/"
BLOG_EMAIL = "colour-developers@colour-science.org"
BLOG_DESCRIPTION = "Colour: Colour Science for Python" # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# az Azerbaijani
# bg Bulgarian
# bs Bosnian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# gl Galician
# he Hebrew
# hi Hindi
# hr Croatian
# hu Hungarian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# lt Lithuanian
# nb Norwegian (Bokmål)
# nl Dutch
# pa Punjabi
# pl Polish
# pt Portuguese
# pt_br Portuguese (Brazil)
# ru Russian
# sk Slovak
# sl Slovene
# sq Albanian
# sr Serbian (Cyrillic)
# sr_latin Serbian (Latin)
# sv Swedish
# te Telugu
# th Thai
# tr Turkish [NOT tr_TR]
# uk Ukrainian
# ur Urdu
# zh_cn Chinese (Simplified)
# zh_tw Chinese (Traditional)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('https://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('https://apple.com/', 'Apple'),
# ('https://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 4 theme,
# may present issues if the menu is too large.
# (in Bootstrap, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
(
(
(
"/apps",
'<span class="font-weight-bold">Apps (Colour - Dash)</span>',
),
(
"http://awesome-colour.org/",
'<span class="font-weight-bold">Awesome Colour</span>',
),
("/colour", '<span class="font-weight-bold">Colour</span>'),
(
"/colour-datasets",
'<span class="font-weight-bold">Colour - Datasets</span>',
),
(
"/colour-demosaicing",
'<span class="font-weight-bold">Colour - Demosaicing</span>',
),
(
"/colour-hdri",
'<span class="font-weight-bold">Colour - HDRI</span>',
),
(
"/colour-checker-detection",
'<span class="font-weight-bold">Colour - Checker Detection</span>',
),
("/colour-maya", "Colour - Maya"),
("/colour-nuke", "Colour - Nuke"),
("/colour-playground", "Colour - Playground"),
("/colour-spectroscope", "Colour - Spectroscope"),
("/experiments", "Experiments"),
),
"Projects",
),
(
(
(
"/installation-guide",
'<span class="font-weight-bold">Installation Guide</span>',
),
(
"https://colour.readthedocs.io/en/develop/tutorial.html",
'<span class="font-weight-bold">Static Tutorial</span>',
),
(
"https://colab.research.google.com/notebook#fileId="
"1Im9J7or9qyClQCv5sPHmKdyiQbG4898K&"
"offline=true&sandboxMode=true",
'<span class="font-weight-bold">Interactive Tutorial</span>',
),
(
"https://colab.research.google.com/notebook#fileId="
"1NRcdXSCshivkwoU2nieCvC3y14fx1X4X&"
"offline=true&sandboxMode=true",
'<span class="font-weight-bold">How-To Guide</span>',
),
(
"/api-reference",
'<span class="font-weight-bold">API Reference</span>',
),
(
"/code-of-conduct",
'<span class="font-weight-bold">Code of Conduct</span>',
),
(
"/contributing",
'<span class="font-weight-bold">Contributing</span>',
),
(
"/contributors",
'<span class="font-weight-bold">Contributors</span>',
),
("/features", "Features"),
("/history", "History"),
(
"https://colour.readthedocs.io/en/develop/bibliography.html",
"Bibliography",
),
("https://doi.org/10.5281/zenodo.4445350", "Cite Us"),
("/cited-by", "Cited By"),
("https://opensource.org/licenses/BSD-3-Clause", "License"),
("/search", "Search"),
),
"Documentation",
),
(
(
("/blog", '<span class="font-weight-bold">Posts</span>'),
("/archive.html", "Archive"),
("/categories/", "Tags"),
),
"Blog",
),
(
"mailto:colour-developers@colour-science.org",
'<i class="fas fa-envelope"></i>',
),
(
"https://github.com/colour-science/colour",
'<i class="fab fa-github"></i>',
),
(
"https://www.facebook.com/python.colour.science",
'<i class="fab fa-facebook"></i>',
),
(
"https://gitter.im/colour-science/colour",
'<i class="fab fa-gitter"></i>',
),
(
"https://twitter.com/colour_science",
'<i class="fab fa-twitter"></i>',
),
),
}
# Alternative navigation links. Works the same way NAVIGATION_LINKS does,
# although themes may not always support them. (translatable)
# (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title)
NAVIGATION_ALT_LINKS = {DEFAULT_LANG: {}}
# Name of the theme to use.
THEME = "colour-science"
# Primary color of your theme. This will be used to customize your theme.
# Must be a HEX value.
THEME_COLOR = "#191919"
# Theme configuration. Fully theme-dependent. (translatable)
# Examples below are for bootblog4.
THEME_CONFIG = {
DEFAULT_LANG: {
# Show the latest featured post in a large box, with the previewimage as its background.
# 'featured_large': False,
# Show the first (remaining) two featured posts in small boxes.
# 'featured_small': False,
# Show featured posts on mobile.
# 'featured_on_mobile': True,
# Show image in `featured_large` on mobile.
# `featured_small` displays them only on desktop.
# 'featured_large_image_on_mobile': True,
# Strip HTML from featured post text.
# 'featured_strip_html': False,
# Contents of the sidebar, If empty, the sidebar is not displayed.
# 'sidebar': ''
}
}
# POSTS and PAGES contains (wildcard, destination, template) tuples.
# (translatable)
#
# The wildcard is used to generate a list of source files
# (whatever/thing.rst, for example).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.rst and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output/TRANSLATIONS[lang]/destination/pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
# The page might also be placed in /destination/pagename/index.html
# if PRETTY_URLS are enabled.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds, indexes, tag lists and archives and are considered part
# of a blog, while PAGES are just independent HTML pages.
#
# Finally, note that destination can be translated, i.e. you can
# specify a different translation folder per language. Example:
# PAGES = (
# ("pages/*.rst", {"en": "pages", "de": "seiten"}, "page.tmpl"),
# ("pages/*.md", {"en": "pages", "de": "seiten"}, "page.tmpl"),
# )
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
("posts/*.ipynb", "posts", "post.tmpl"),
)
PAGES = (
("pages/*.rst", "", "page.tmpl"),
("pages/*.md", "", "page.tmpl"),
("pages/*.txt", "", "page.tmpl"),
("pages/*.html", "", "page.tmpl"),
("pages/*.ipynb", "", "page.tmpl"),
)
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "Europe/London"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates. (translatable)
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used. (translatable)
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, Bootstrap already does.
# DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# LOCALES = {}
# LOCALE_FALLBACK = None
# LOCALE_DEFAULT = None
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": (".rst", ".txt"),
"markdown": (".md", ".mdown", ".markdown"),
"textile": (".textile",),
"txt2tags": (".t2t",),
"bbcode": (".bb",),
"wiki": (".wiki",),
"ipynb": (".ipynb",),
"html": (".html", ".htm"),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": (".php",),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# Preferred metadata format for new posts
# "Nikola": reST comments, wrapped in a HTML comment if needed (default)
# "YAML": YAML wrapped in "---"
# "TOML": TOML wrapped in "+++"
# "Pelican": Native markdown metadata or reST docinfo fields. Nikola style for other formats.
# METADATA_FORMAT = "Nikola"
# Use date-based path when creating posts?
# Can be enabled on a per-post basis with `nikola new_post -d`.
# The setting is ignored when creating pages.
# NEW_POST_DATE_PATH = False
# What format to use when creating posts with date paths?
# Default is '%Y/%m/%d', other possibilities include '%Y' or '%Y/%m'.
# NEW_POST_DATE_PATH_FORMAT = '%Y/%m/%d'
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
WRITE_TAG_CLOUD = False
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag RSS_EXTENSION (RSS feed for a tag)
# (translatable)
# TAG_PATH = "categories"
# By default, the list of tags is stored in
# output / TRANSLATION[lang] / TAG_PATH / index.html
# (see explanation for TAG_PATH). This location can be changed to
# output / TRANSLATION[lang] / TAGS_INDEX_PATH
# with an arbitrary relative path TAGS_INDEX_PATH.
# (translatable)
# TAGS_INDEX_PATH = "tags.html"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for tag pages. The default is "Posts about TAG".
# TAG_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page, the tag cloud and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ["mathjax"]
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# A list of dictionaries specifying tags which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# For example:
# [
# {'en': 'private', 'de': 'Privat'},
# {'en': 'work', 'fr': 'travail', 'de': 'Arbeit'},
# ]
# TAG_TRANSLATIONS = []
# If set to True, a tag in a language will be treated as a translation
# of the literally same tag in all other languages. Enable this if you
# do not translate tags, for example.
# TAG_TRANSLATIONS_ADD_DEFAULTS = True
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category RSS_EXTENSION (RSS feed for a category)
# (translatable)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# By default, the list of categories is stored in
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html
# (see explanation for CATEGORY_PATH). This location can be changed to
# output / TRANSLATION[lang] / CATEGORIES_INDEX_PATH
# with an arbitrary relative path CATEGORIES_INDEX_PATH.
# (translatable)
# CATEGORIES_INDEX_PATH = "categories.html"
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for category pages. The default is "Posts about CATEGORY".
# CATEGORY_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# A list of dictionaries specifying categories which translate to each other.
# Format: a list of dicts {language: translation, language2: translation2, …}
# See TAG_TRANSLATIONS example above.
# CATEGORY_TRANSLATIONS = []
# If set to True, a category in a language will be treated as a translation
# of the literally same category in all other languages. Enable this if you
# do not translate categories, for example.
# CATEGORY_TRANSLATIONS_ADD_DEFAULTS = True
# If no category is specified in a post, the destination path of the post
# can be used in its place. This replaces the sections feature. Using
# category hierarchies is recommended.
# CATEGORY_DESTPATH_AS_DEFAULT = False
# If True, the prefix will be trimmed from the category name, eg. if the
# POSTS destination is "foo/bar", and the path is "foo/bar/baz/quux",
# the category will be "baz/quux" (or "baz" if only the first directory is considered).
# Note that prefixes coming from translations are always ignored.
# CATEGORY_DESTPATH_TRIM_PREFIX = False
# If True, only the first directory of a path will be used.
# CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY = True
# Map paths to prettier category names. (translatable)
# CATEGORY_DESTPATH_NAMES = {
# DEFAULT_LANG: {
# 'webdev': 'Web Development',
# 'webdev/django': 'Web Development/Django',
# 'random': 'Odds and Ends',
# },
# }
# By default, category indexes will appear in CATEGORY_PATH and use
# CATEGORY_PREFIX. If this is enabled, those settings will be ignored (except
# for the index) and instead, they will follow destination paths (eg. category
# 'foo' might appear in 'posts/foo'). If the category does not come from a
# destpath, first entry in POSTS followed by the category name will be used.
# For this setting, category hierarchies are required and cannot be flattened.
# CATEGORY_PAGES_FOLLOW_DESTPATH = False
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
# ENABLE_AUTHOR_PAGES = True
# Path to author pages. Final locations are:
# output / TRANSLATION[lang] / AUTHOR_PATH / index.html (list of authors)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.html (list of posts by an author)
# output / TRANSLATION[lang] / AUTHOR_PATH / author RSS_EXTENSION (RSS feed for an author)
# (translatable)
# AUTHOR_PATH = "authors"
# If AUTHOR_PAGES_ARE_INDEXES is set to True, each author's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# AUTHOR_PAGES_ARE_INDEXES = False
# Set descriptions for author pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the author list or index page’s title.
# AUTHOR_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "Juanjo Conti": "Python coder and writer.",
# "Roberto Alsina": "Nikola father."
# },
# }
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ["Guest"]
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# (translatable)
INDEX_PATH = "blog"
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {DEFAULT_LANG: ""}
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Create previous, up, next navigation links for archives
# CREATE_ARCHIVE_NAVIGATION = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Extension for RSS feed files
# RSS_EXTENSION = ".xml"
# RSS filename base (without extension); used for indexes and galleries.
# (translatable)
# RSS_FILENAME_BASE = "rss"
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / RSS_FILENAME_BASE RSS_EXTENSION
# (translatable)
# RSS_PATH = ""
# Final location for the blog main Atom feed is:
# output / TRANSLATION[lang] / ATOM_PATH / ATOM_FILENAME_BASE ATOM_EXTENSION
# (translatable)
# ATOM_PATH = ""
# Atom filename base (without extension); used for indexes.
# (translatable)
ATOM_FILENAME_BASE = "feed"
# Extension for Atom feed files
# ATOM_EXTENSION = ".atom"
# Slug the Tag URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# Slug the Author URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_AUTHOR_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = "src"
GITHUB_DEPLOY_BRANCH = "master"
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = "origin"
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = True
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
#
from nikola import filters # noqa
FILTERS = { # noqa
# ".html": [filters.add_header_permalinks],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
}
# Executable for the "yui_compressor" filter (defaults to 'yui-compressor').
# YUI_COMPRESSOR_EXECUTABLE = 'yui-compressor'
# Executable for the "closure_compiler" filter (defaults to 'closure-compiler').
# CLOSURE_COMPILER_EXECUTABLE = 'closure-compiler'
# Executable for the "optipng" filter (defaults to 'optipng').
# OPTIPNG_EXECUTABLE = 'optipng'
# Executable for the "jpegoptim" filter (defaults to 'jpegoptim').
# JPEGOPTIM_EXECUTABLE = 'jpegoptim'
# Executable for the "html_tidy_withconfig", "html_tidy_nowrap",
# "html_tidy_wrap", "html_tidy_wrap_attr" and "html_tidy_mini" filters
# (defaults to 'tidy5').
# HTML_TIDY_EXECUTABLE = 'tidy5'
# List of XPath expressions which should be used for finding headers
# ({hx} is replaced by headers h1 through h6).
# You must change this if you use a custom theme that does not use
# "e-content entry-content" as a class for post and page contents.
# HEADER_PERMALINKS_XPATH_LIST = ['*//div[@class="e-content entry-content"]//{hx}']
# Include *every* header (not recommended):
# HEADER_PERMALINKS_XPATH_LIST = ['*//{hx}']
# File blacklist for header permalinks. Contains output path
# (eg. 'output/index.html')
# HEADER_PERMALINKS_FILE_BLACKLIST = []
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.atom', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# If set to True, EXIF data will be copied when an image is thumbnailed or
# resized. (See also EXIF_WHITELIST)
# PRESERVE_EXIF_DATA = False
# If you have enabled PRESERVE_EXIF_DATA, this option lets you choose EXIF
# fields you want to keep in images. (See also PRESERVE_EXIF_DATA)
#
# For a full list of field names, please see here:
# http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
#
# This is a dictionary of lists. Each key in the dictionary is the
# name of a IDF, and each list item is a field you want to preserve.
# If you have a IDF with only a '*' item, *EVERY* item in it will be
# preserved. If you don't want to preserve anything in a IDF, remove it
# from the setting. By default, no EXIF information is kept.
# Setting the whitelist to anything other than {} implies
# PRESERVE_EXIF_DATA is set to True
# To preserve ALL EXIF data, set EXIF_WHITELIST to {"*": "*"}
# EXIF_WHITELIST = {}
# Some examples of EXIF_WHITELIST settings:
# Basic image information:
# EXIF_WHITELIST['0th'] = [
# "Orientation",
# "XResolution",
# "YResolution",
# ]
# If you want to keep GPS data in the images:
# EXIF_WHITELIST['GPS'] = ["*"]
# Embedded thumbnail information:
# EXIF_WHITELIST['1st'] = ["*"]
# If set to True, any ICC profile will be copied when an image is thumbnailed or
# resized.
# PRESERVE_ICC_PROFILES = False
# Folders containing images to be used in normal posts or pages.
# IMAGE_FOLDERS is a dictionary of the form {"source": "destination"},
# where "source" is the folder containing the images to be published, and
# "destination" is the folder under OUTPUT_PATH containing the images copied
# to the site. Thumbnail images will be created there as well.
# To reference the images in your posts, include a leading slash in the path.
# For example, if IMAGE_FOLDERS = {'images': 'images'}, write
#
# .. image:: /images/tesla.jpg
#
# See the Nikola Handbook for details (in the “Embedding Images” and
# “Thumbnails” sections)
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {"images": "images"}
# IMAGE_THUMBNAIL_SIZE = 400
# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# prettier URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
#
# If the following is true, a page range navigation will be inserted to indices.
# Please note that this will undo the effect of INDEXES_STATIC, as all index pages
# must be recreated whenever the number of pages changes.
# SHOW_INDEX_PAGE_NAVIGATION = False
# If the following is True, a meta name="generator" tag is added to pages. The
# generator tag is used to specify the software used to generate the page
# (it promotes Nikola).
# META_GENERATOR_TAG = True
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored. Leave empty to disable.
# Can be any of:
# algol, algol_nu, autumn, borland, bw, colorful, default, emacs, friendly,
# fruity, igor, lovelace, manni, monokai, murphy, native, paraiso-dark,
# paraiso-light, pastie, perldoc, rrt, tango, trac, vim, vs, xcode
# This list MAY be incomplete since pygments adds styles every now and then.
# Check with list(pygments.styles.get_all_styles()) in an interpreter.
# CODE_COLOR_SCHEME = 'default'
# FAVICONS contains (name, file, size) tuples.
# Used to create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = (
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# )
# Show teasers (instead of full posts) in indexes? Defaults to False.
INDEX_TEASERS = True
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {post_title} The title of the post.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = (
'<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
)
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
# Minimum example for use with Piwik: "pk_campaign=feed"
# The following tags exist and are replaced for you:
# {feedRelUri} A relative link to the feed.
# {feedFormat} The name of the syndication format.
# Example using replacement for use with Google Analytics:
# "utm_source={feedRelUri}&utm_medium=nikola_feed&utm_campaign={feedFormat}_feed"
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = """
<ul class="nav justify-content-center">
<li class="nav-item mx-3">
<a href="mailto:colour-developers@colour-science.org">
<i class="fas fa-envelope text-light"></i>
</a>
</li>
<li class="nav-item mx-3">
<a href="https://github.com/colour-science/colour">
<i class="fab fa-github text-light"></i>
</a>
</li>
<li class="nav-item mx-3">
<a href="https://gitter.im/colour-science/colour">
<i class="fab fa-gitter text-light"></i>
</a>
</li>
<li class="nav-item mx-3">
<a href="https://twitter.com/colour_science">
<i class="fab fa-twitter text-light"></i>
</a>
</li>
<li class="nav-item mx-3">
<a href="https://www.facebook.com/python.colour.science">
<i class="fab fa-facebook text-light"></i>
</a>
</li>
</ul>
<div class="text-center text-light py-3"><span>Copyright © 2013-{date} – {author}</span></div>
"""
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# If you need to use the literal braces '{' and '}' in your footer text, use
# '{{' and '}}' to escape them (str.format is used)
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE,
},
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = (
'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
)
RSS_COPYRIGHT_PLAIN = "Contents © {date} {author} {license}"
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "colour-science"
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
STRIP_INDEXES = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = True
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts (not pages!) by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (wihch may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\\[", right: "\\\\]", display: true},
# {left: "\\\\begin{equation*}", right: "\\\\end{equation*}", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\\(", right: "\\\\)", display: false}
# ]
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# The default is ['fenced_code', 'codehilite']
MARKDOWN_EXTENSIONS = [
"markdown.extensions.fenced_code",
"markdown.extensions.codehilite",
"markdown.extensions.extra",
]
# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)
# Default is {} (no config at all)
# MARKDOWN_EXTENSION_CONFIGS = {}
# Extra options to pass to the pandoc command.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# Pandoc does not demote headers by default. To enable this, you can use, for example
# ['--base-header-level=2']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style"> # noqa
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# By default, Nikola does not generates Atom files for indexes and links to
# them. Generate Atom for tags by setting TAG_PAGES_ARE_INDEXES to True.
# Atom feeds are built based on INDEX_DISPLAY_POST_COUNT and not FEED_LENGTH
# Switch between plain-text summaries and full HTML content using the
# FEED_TEASER option. FEED_LINKS_APPEND_QUERY is also respected. Atom feeds
# are generated even for old indexes and have pagination link relations
# between each other. Old Atom feeds with no changes are marked as archived.
# GENERATE_ATOM = False
# Only include teasers in Atom and RSS feeds. Disabling include the full
# content. Defaults to True.
# FEED_TEASERS = True
# Strip HTML from Atom and RSS feed summaries and content. Defaults to False.
# FEED_PLAIN = False
# Number of posts in Atom and RSS feeds.
# FEED_LENGTH = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# A search form to search this site, for the sidebar. You can use a Google
# custom search (https://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- DuckDuckGo custom search -->
# <form method="get" id="search" action="https://duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s">
# <input type="hidden" name="k8" value="#444444">
# <input type="hidden" name="k9" value="#D51920">
# <input type="hidden" name="kt" value="h">
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;">
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Google custom search -->
# <form method="get" action="https://www.google.com/search" class="navbar-form navbar-right" role="search">
# <div class="form-group">
# <input type="text" name="q" class="form-control" placeholder="Search">
# </div>
# <button type="submit" class="btn btn-primary">
# <span class="glyphicon glyphicon-search"></span>
# </button>
# <input type="hidden" name="sitesearch" value="%s">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
EXTRA_HEAD_DATA = """
<script
src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
crossorigin="anonymous">
</script>
<link rel="stylesheet"
href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"
integrity="sha384-DNOHZ68U8hZfKXOrtjWvjxusGo9WQnrNx2sqG0tfsghAvtVlRW3tvkXWZh58N9jp"
crossorigin="anonymous">
<meta name="google-site-verification" content="CeWflIcbu-x7Ur7qbzTYJiRFvoGF5RljJyGZMg7H9co" />
"""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
BODY_END = """
<!-- >>> Gitter -->
<script>
((window.gitter = {}).chat = {}).options = {
room: 'colour-science/colour'
};
</script>
<script async src="https://sidecar.gitter.im/dist/sidecar.v1.js"></script>
<!-- <<< Gitter -->
<!-- >>> Google Analytics -->
<script async src="https://www.colour-science.org/assets/js/analytics.js"></script>
<!-- <<< Google Analytics -->
"""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '.*\/(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.rst'
# (Note the '.*\/' in the beginning -- matches source paths relative to conf.py)
# FILE_METADATA_REGEXP = None
# Should titles fetched from file metadata be unslugified (made prettier?)
# FILE_METADATA_UNSLUGIFY_TITLES = True
# If enabled, extract metadata from docinfo fields in reST documents
# USE_REST_DOCINFO_METADATA = False
# If enabled, hide docinfo fields in reST document output
# HIDE_REST_DOCINFO = False
# Map metadata from other formats to Nikola names.
# Supported formats: yaml, toml, rest_docinfo, markdown_metadata
# METADATA_MAPPING = {}
#
# Example for Pelican compatibility:
# METADATA_MAPPING = {
# "rest_docinfo": {"summary": "description", "modified": "updated"},
# "markdown_metadata": {"summary": "description", "modified": "updated"}
# }
# Other examples: https://getnikola.com/handbook.html#mapping-metadata-from-other-formats
# Map metadata between types/values. (Runs after METADATA_MAPPING.)
# Supported formats: nikola, yaml, toml, rest_docinfo, markdown_metadata
# The value on the right should be a dict of callables.
# METADATA_VALUE_MAPPING = {}
# Examples:
# METADATA_VALUE_MAPPING = {
# "yaml": {"keywords": lambda value: ', '.join(value)}, # yaml: 'keywords' list -> str
# "nikola": {
# "widgets": lambda value: value.split(', '), # nikola: 'widgets' comma-separated string -> list
# "tags": str.lower # nikola: force lowercase 'tags' (input would be string)
# }
# }
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS into single files to make
# site loading faster in a HTTP/1.1 environment but is not recommended for
# HTTP/2.0 when caching is used. Defaults to True.
USE_BUNDLES = False
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin.
# Use with care.
# DISABLE_INDEXES = False
# DISABLE_MAIN_ATOM_FEED = False
# DISABLE_MAIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# Enabling hyphenation has been shown to break math support in some cases,
# use with caution.
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# Docutils, by default, will perform a transform in your documents
# extracting unique titles at the top of your document and turning
# them into metadata. This surprises a lot of people, and setting
# this option to True will prevent it.
# NO_DOCUTILS_TITLE_TRANSFORM = False
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# If set to True, the tags 'draft', 'mathjax' and 'private' have special
# meaning. If set to False, these tags are handled like regular tags.
USE_TAG_METADATA = False
# If set to True, a warning is issued if one of the 'draft', 'mathjax'
# and 'private' tags are found in a post. Useful for checking that
# migration was successful.
WARN_ABOUT_TAG_METADATA = False
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
# Compiler to process Sass files.
SASS_COMPILER = "sass"
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
SASS_OPTIONS = ["--style", "compressed"]
|
colour-science/colour-science.org
|
conf.py
|
Python
|
bsd-3-clause
| 59,916
|
[
"VisIt"
] |
3225e94a6b9690dcda67b64e4023294b15c1b2daa4068f646f403844a9b61cf6
|
##
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide utilities for printing endnotes in text reports.
"""
from ..docgen import FontStyle, ParagraphStyle, FONT_SANS_SERIF
from ...lib import NoteType, Citation
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from ...utils.string import confidence
from ...datehandler import displayer
def add_endnote_styles(style_sheet):
"""
Add paragraph styles to a style sheet to be used for displaying endnotes.
@param style_sheet: Style sheet
@type style_sheet: L{docgen.StyleSheet}
"""
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.2)
para.set_bottom_margin(0.2)
para.set_description(_('The style used for the generation header.'))
style_sheet.add_paragraph_style("Endnotes-Header", para)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_top_margin(0.2)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the endnotes source display.'))
style_sheet.add_paragraph_style("Endnotes-Source", para)
para = ParagraphStyle()
para.set(lmargin=.75)
para.set_top_margin(0.2)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the endnotes notes display.'))
style_sheet.add_paragraph_style("Endnotes-Source-Notes", para)
para = ParagraphStyle()
para.set(first_indent=-0.9, lmargin=1.9)
para.set_top_margin(0.2)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the endnotes reference display.'))
style_sheet.add_paragraph_style("Endnotes-Ref", para)
para = ParagraphStyle()
para.set(lmargin=1.9)
para.set_top_margin(0.2)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the endnotes reference notes display.'))
style_sheet.add_paragraph_style("Endnotes-Ref-Notes", para)
def cite_source(bibliography, database, obj):
"""
Cite any sources for the object and add them to the bibliography.
@param bibliography: The bibliography to contain the citations.
@type bibliography: L{Bibliography}
@param obj: An object with source references.
@type obj: L{gen.lib.CitationBase}
"""
txt = ""
slist = obj.get_citation_list()
if slist:
first = 1
for ref in slist:
if not first:
txt += ', '
first = 0
citation = database.get_citation_from_handle(ref)
(cindex, key) = bibliography.add_reference(citation)
txt += "%d" % (cindex + 1)
if key is not None:
txt += key
return txt
def write_endnotes(bibliography, database, doc, printnotes=False, links=False):
"""
Write all the entries in the bibliography as endnotes.
@param bibliography: The bibliography that contains the citations.
@type bibliography: L{Bibliography}
@param database: The database that the sources come from.
@type database: DbBase
@param doc: The document to write the endnotes into.
@type doc: L{docgen.TextDoc}
@param printnotes: Indicate if the notes attached to a source must be
written too.
@type printnotes: bool
@param links: Indicate if URL links should be makde 'clickable'.
@type links: bool
"""
if bibliography.get_citation_count() == 0:
return
doc.start_paragraph('Endnotes-Header')
doc.write_text(_('Endnotes'))
doc.end_paragraph()
cindex = 0
for citation in bibliography.get_citation_list():
cindex += 1
source = database.get_source_from_handle(citation.get_source_handle())
first = True
doc.start_paragraph('Endnotes-Source', "%d." % cindex)
doc.write_text(_format_source_text(source), links=links)
doc.end_paragraph()
if printnotes:
_print_notes(source, database, doc, 'Endnotes-Source-Notes', links)
for key, ref in citation.get_ref_list():
doc.start_paragraph('Endnotes-Ref', "%s:" % key)
doc.write_text(_format_ref_text(ref, key), links=links)
doc.end_paragraph()
if printnotes:
_print_notes(ref, database, doc, 'Endnotes-Ref-Notes', links)
def _format_source_text(source):
if not source: return ""
src_txt = ""
if source.get_author():
src_txt += source.get_author()
if source.get_title():
if src_txt:
src_txt += ", "
src_txt += '"%s"' % source.get_title()
if source.get_publication_info():
if src_txt:
src_txt += ", "
src_txt += source.get_publication_info()
if source.get_abbreviation():
if src_txt:
src_txt += ", "
src_txt += "(%s)" % source.get_abbreviation()
return src_txt
def _format_ref_text(ref, key):
if not ref: return ""
ref_txt = ""
datepresent = False
date = ref.get_date_object()
if date is not None and not date.is_empty():
datepresent = True
if datepresent:
if ref.get_page():
ref_txt = "%s - %s" % (ref.get_page(), displayer.display(date))
else:
ref_txt = displayer.display(date)
else:
ref_txt = ref.get_page()
# Print only confidence level if it is not Normal
if ref.get_confidence_level() != Citation.CONF_NORMAL:
ref_txt += " [" + confidence[ref.get_confidence_level()] + "]"
return ref_txt
def _print_notes(obj, db, doc, style, links):
note_list = obj.get_note_list()
ind = 1
for notehandle in note_list:
note = db.get_note_from_handle(notehandle)
contains_html = note.get_type() == NoteType.HTML_CODE
doc.write_styled_note(note.get_styledtext(), note.get_format(), style,
contains_html=contains_html, links=links)
ind += 1
|
Forage/Gramps
|
gramps/gen/plug/report/endnotes.py
|
Python
|
gpl-2.0
| 7,028
|
[
"Brian"
] |
42f82df62e81fe0bb186a14b0772308efa38935fd31126ac4faa470f763a26b0
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import sqlite3
class FactoidsTestCase(ChannelPluginTestCase):
plugins = ('Factoids',)
def testRandomfactoid(self):
self.assertError('random')
self.assertNotError('learn jemfinch is my primary author')
self.assertRegexp('random', 'primary author')
def testLearn(self):
self.assertError('learn is my primary author')
self.assertError('learn jemfinch is')
self.assertNotError('learn jemfinch is my primary author')
self.assertNotError('info jemfinch')
self.assertRegexp('whatis jemfinch', 'my primary author')
self.assertRegexp('whatis JEMFINCH', 'my primary author')
self.assertRegexp('whatis JEMFINCH 1', 'my primary author')
self.assertNotError('learn jemfinch is a bad assembly programmer')
self.assertRegexp('whatis jemfinch 2', 'bad assembly')
self.assertNotRegexp('whatis jemfinch 2', 'primary author')
self.assertRegexp('whatis jemfinch', r'.*primary author.*assembly')
self.assertError('forget jemfinch')
self.assertError('forget jemfinch 3')
self.assertError('forget jemfinch 0')
self.assertNotError('forget jemfinch 2')
self.assertNotError('forget jemfinch 1')
self.assertError('whatis jemfinch')
self.assertError('info jemfinch')
self.assertNotError('learn foo bar is baz')
self.assertNotError('info foo bar')
self.assertRegexp('whatis foo bar', 'baz')
self.assertNotError('learn foo bar is quux')
self.assertRegexp('whatis foo bar', '.*baz.*quux')
self.assertError('forget foo bar')
self.assertNotError('forget foo bar 2')
self.assertNotError('forget foo bar 1')
self.assertError('whatis foo bar')
self.assertError('info foo bar')
self.assertError('learn foo bar baz') # No 'is'
self.assertError('learn foo bar') # No 'is'
with conf.supybot.plugins.Factoids.requireVoice.context(True):
self.assertError('learn jemfinch is my primary author')
self.irc.feedMsg(ircmsgs.mode(self.channel,
args=('+h', self.nick)))
self.assertNotError('learn jemfinch is my primary author')
def testChangeFactoid(self):
self.assertNotError('learn foo is bar')
self.assertNotError('change foo 1 s/bar/baz/')
self.assertRegexp('whatis foo', 'baz')
self.assertError('change foo 2 s/bar/baz/')
self.assertError('change foo 0 s/bar/baz/')
def testSearchFactoids(self):
self.assertNotError('learn jemfinch is my primary author')
self.assertNotError('learn strike is a cool person working on me')
self.assertNotError('learn inkedmn is another of my developers')
self.assertNotError('learn jamessan is jamessan is a developer of much python')
self.assertNotError('learn bwp is bwp is author of my weather command')
self.assertRegexp('factoids search --regexp /.w./', 'bwp')
self.assertRegexp('factoids search --regexp /^.+i/',
'jemfinch.*strike')
self.assertNotRegexp('factoids search --regexp /^.+i/', 'inkedmn')
self.assertRegexp('factoids search --regexp m/j/ --regexp m/ss/',
'jamessan')
self.assertRegexp('factoids search --regexp m/^j/ *ss*',
'jamessan')
self.assertRegexp('factoids search --regexp /^j/',
'jamessan.*jemfinch')
self.assertRegexp('factoids search j*', 'jamessan.*jemfinch')
self.assertRegexp('factoids search *ke*',
'inkedmn.*strike|strike.*inkedmn')
self.assertRegexp('factoids search ke',
'inkedmn.*strike|strike.*inkedmn')
self.assertRegexp('factoids search jemfinch',
'my primary author')
self.assertRegexp('factoids search --values primary author',
'my primary author')
def testWhatisOnNumbers(self):
self.assertNotError('learn 911 is emergency number')
self.assertRegexp('whatis 911', 'emergency number')
def testNotZeroIndexed(self):
self.assertNotError('learn foo is bar')
self.assertNotRegexp('info foo', '#0')
self.assertNotRegexp('whatis foo', '#0')
self.assertNotError('learn foo is baz')
self.assertNotRegexp('info foo', '#0')
self.assertNotRegexp('whatis foo', '#0')
def testInfoReturnsRightNumber(self):
self.assertNotError('learn foo is bar')
self.assertNotRegexp('info foo', '2 factoids')
def testInfoUsageCount(self):
self.assertNotError('learn moo is cow')
self.assertRegexp('info moo', 'recalled 0 times')
self.assertNotError('whatis moo')
self.assertRegexp('info moo', 'recalled 1 time')
def testLearnSeparator(self):
self.assertError('learn foo as bar')
self.assertNotError('learn foo is bar')
self.assertRegexp('whatis foo', 'bar')
orig = conf.supybot.plugins.Factoids.learnSeparator()
try:
conf.supybot.plugins.Factoids.learnSeparator.setValue('as')
self.assertError('learn bar is baz')
self.assertNotError('learn bar as baz')
self.assertRegexp('whatis bar', 'baz')
finally:
conf.supybot.plugins.Factoids.learnSeparator.setValue(orig)
def testShowFactoidIfOnlyOneMatch(self):
m1 = self.assertNotError('factoids search m/foo|bar/')
orig = conf.supybot.plugins.Factoids.showFactoidIfOnlyOneMatch()
try:
conf.supybot.plugins.Factoids. \
showFactoidIfOnlyOneMatch.setValue(False)
m2 = self.assertNotError('factoids search m/foo/')
self.failUnless(m1.args[1].startswith(m2.args[1]))
finally:
conf.supybot.plugins.Factoids. \
showFactoidIfOnlyOneMatch.setValue(orig)
def testInvalidCommand(self):
self.assertNotError('learn foo is bar')
self.assertRegexp('foo', 'bar')
self.assertNotError('learn mooz is cowz')
self.assertRegexp('moo', 'mooz')
self.assertRegexp('mzo', 'mooz')
self.assertRegexp('moz', 'mooz')
self.assertNotError('learn moped is pretty fast')
self.assertRegexp('moe', 'mooz.*moped')
self.assertError('nosuchthing')
def testWhatis(self):
self.assertNotError('learn foo is bar')
self.assertRegexp('whatis foo', 'bar')
self.assertRegexp('whatis foob', 'foo')
self.assertNotError('learn foob is barb')
self.assertRegexp('whatis foom', 'foo.*foob')
def testStandardSubstitute(self):
self.assertNotError('learn foo is this is $channel, and hour is $hour')
self.assertRegexp('whatis foo', 'this is #test, and hour is \d{1,2}')
self.assertRegexp('whatis --raw foo', 'this is \$channel, and hour is \$hour')
self.assertNotError('learn bar is this is $$channel escaped')
self.assertRegexp('whatis bar', 'this is \$channel')
self.assertNotError('learn bar is this is $minute')
self.assertRegexp('whatis bar', '\$channel.*\d{1,2}')
def testAlias(self):
self.assertNotError('learn foo is bar')
self.assertNotError('alias foo zoog')
self.assertRegexp('whatis zoog', 'bar')
self.assertNotError('learn foo is snorp')
self.assertError('alias foo gnoop')
self.assertNotError('alias foo gnoop 2')
self.assertRegexp('whatis gnoop', 'snorp')
def testRank(self):
self.assertNotError('learn foo is bar')
self.assertNotError('learn moo is cow')
self.assertRegexp('factoids rank', '#1 foo \(0\), #2 moo \(0\)')
self.assertRegexp('whatis moo', '.*cow.*')
self.assertRegexp('factoids rank', '#1 moo \(1\), #2 foo \(0\)')
self.assertRegexp('factoids rank 1', '#1 moo \(1\)')
self.assertNotRegexp('factoids rank 1', 'foo')
self.assertRegexp('factoids rank --plain', 'moo, foo')
self.assertRegexp('factoids rank --plain --alpha', 'foo, moo')
self.assertResponse('factoids rank --plain 1', 'moo')
def testQuoteHandling(self):
self.assertNotError('learn foo is "\\"bar\\""')
self.assertRegexp('whatis foo', r'"bar"')
def testLock(self):
self.assertNotError('learn foo is bar')
self.assertNotError('lock foo')
self.assertNotError('unlock foo')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
Ban3/Limnoria
|
plugins/Factoids/test.py
|
Python
|
bsd-3-clause
| 10,283
|
[
"MOE"
] |
90b7b19982ad59cdbdf4e1c40b9001fa65b851a42331bf8b3256f43a41956a9f
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/datasets/samples_generator.py
|
Python
|
gpl-2.0
| 56,502
|
[
"Gaussian"
] |
9442f71b0fcf348848656dd2fa85178af4e9b2268ab4f52359b77c2cf2c8c74c
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_geodynamic_adiabat
----------------
This example script demonstrates how burnman can be used to
self-consistently calculate properties along 1D adiabatic profiles
for use in geodynamics simulations.
We use interrogate a PerplexMaterial for material properties
as a function of pressure and temperature, and calculate
both unrelaxed (short timescale) and relaxed (long timescale)
properties. The latter are particularly important for
convection studies, where reactions are fast compared with
timescales of convection.
Finally, we show how burnman can be used to smooth entropy and
volume in order to create smoothly varying relaxed properties.
*Uses:*
* :doc:`mineral_database`
* :class:`burnman.perplex.PerplexMaterial`
* :func:`burnman.material.Material.evaluate`
* :func:`burnman.tools.interp_smoothed_array_and_derivatives`
*Demonstrates:*
* creation of a PerpleX material
* calculation of relaxed and unrelaxed thermodynamic properties
* smoothing of thermodynamic properties
* self consistent 1D geophysical profile generation
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from scipy.optimize import fsolve, brentq
from scipy.integrate import odeint
from scipy.interpolate import UnivariateSpline
# Define fitting function to find the temperature along the isentrope
def isentrope(rock, pressures, entropy, T_guess):
def _deltaS(T, S, P, rock):
rock.set_state(P, T)
return rock.S - S
sol = T_guess
temperatures = np.empty_like(pressures)
for i, P in enumerate(pressures):
sol = brentq(_deltaS, rock.bounds[1][0], rock.bounds[1][1], args=(entropy, P, rock))
temperatures[i] = sol
return temperatures
# Define function to find an isentrope given a
# 2D entropy interpolation function
def interp_isentrope(interp, pressures, entropy, T_guess):
def _deltaS(args, S, P):
T = args[0]
return interp(P, T)[0] - S
sol = [T_guess]
temperatures = np.empty_like(pressures)
for i, P in enumerate(pressures):
sol = fsolve(_deltaS, sol, args=(entropy, P))
temperatures[i] = sol[0]
return temperatures
# Define function to self consistently calculate depth and gravity profiles
# from pressure and density profiles.
def compute_depth_gravity_profiles(pressures, densities, surface_gravity, outer_radius):
gravity = [surface_gravity] * len(pressures) # starting guess
n_gravity_iterations = 5
for i in range(n_gravity_iterations):
# Integrate the hydrostatic equation
# Make a spline fit of densities as a function of pressures
rhofunc = UnivariateSpline(pressures, densities)
# Make a spline fit of gravity as a function of depth
gfunc = UnivariateSpline(pressures, gravity)
# integrate the hydrostatic equation
depths = np.ravel(odeint((lambda p, x: 1./(gfunc(x) * rhofunc(x))), 0.0, pressures))
radii = outer_radius - depths
rhofunc = UnivariateSpline(radii[::-1], densities[::-1])
poisson = lambda p, x: 4.0 * np.pi * burnman.constants.G * rhofunc(x) * x * x
gravity = np.ravel(odeint(poisson, surface_gravity*radii[0]*radii[0], radii))
gravity = gravity / radii / radii
return depths, gravity
# BEGIN USER INPUTS
# Declare the rock we want to use
rock=burnman.PerplexMaterial('../burnman/data/input_perplex/in23_1.tab')
# First we calculate the isentrope at a given potential temperature
potential_temperature = 1550. # K
max_pressure = 25.e9
outer_radius = 6371.e3
surface_gravity = 9.81
n_P_gridpoints = 501
n_T_gridpoints = 101
n_points = 301
pressure_std_dev = 5.e8
temperature_smoothing_factor = 0.5
truncate = 4.
save_output = False
# END USER INPUTS
min_grid_pressure = rock.bounds[0][0]
max_grid_pressure = rock.bounds[0][1]
min_grid_temperature = rock.bounds[1][0]
max_grid_temperature = rock.bounds[1][1]
rock.set_state(1.e5, potential_temperature)
entropy = rock.S
pressures = np.linspace(1.e5, max_pressure, n_points)
temperatures = isentrope(rock, pressures, entropy, potential_temperature)
isentrope = UnivariateSpline(pressures, temperatures)
# Properties can then be calculated along the isentrope
properties = rock.evaluate(['V', 'rho', 'molar_heat_capacity_p',
'thermal_expansivity', 'isothermal_compressibility',
'p_wave_velocity', 'shear_wave_velocity'],
pressures, temperatures)
volumes, densities, C_p, alphas, compressibilities, p_wave_velocities, s_wave_velocities = properties
specific_heats = C_p / rock.params['molar_mass']
depths, gravity = compute_depth_gravity_profiles(pressures, densities, surface_gravity, outer_radius)
x = pressures/1.e9
plt.rcParams['figure.figsize'] = 16, 8 # inches
fig = plt.figure()
ax_T = fig.add_subplot(2, 4, 1)
ax_T.plot(x, temperatures, label='unrelaxed')
ax_T.set_ylabel('Temperature (K)')
ax_T.set_xlabel('Pressures (GPa)')
ax_z = fig.add_subplot(2, 4, 2)
ax_z.plot(x, depths/1.e3)
ax_z.set_ylabel('Depths (km)')
ax_z.set_xlabel('Pressures (GPa)')
ax_g = fig.add_subplot(2, 4, 3)
ax_g.plot(x, gravity)
ax_g.set_ylabel('Gravity (m/s^2)')
ax_g.set_xlabel('Pressures (GPa)')
ax_rho = fig.add_subplot(2, 4, 4)
ax_rho.plot(x, densities)
ax_rho.set_ylabel('Density (kg/m^3)')
ax_rho.set_xlabel('Pressures (GPa)')
ax_cp = fig.add_subplot(2, 4, 5)
ax_cp.plot(x, specific_heats)
ax_cp.set_ylabel('Cp (J/K/kg)')
ax_cp.set_xlabel('Pressures (GPa)')
ax_alpha = fig.add_subplot(2, 4, 6)
ax_alpha.plot(x, alphas)
ax_alpha.set_ylabel('alpha (/K)')
ax_alpha.set_xlabel('Pressures (GPa)')
ax_beta = fig.add_subplot(2, 4, 7)
ax_beta.plot(x, compressibilities)
ax_beta.set_ylabel('compressibilities (/Pa)')
ax_beta.set_xlabel('Pressures (GPa)')
ax_vs = fig.add_subplot(2, 4, 8)
ax_vs.plot(x, p_wave_velocities, label='P')
ax_vs.plot(x, s_wave_velocities, label='S')
ax_vs.legend(loc='upper left')
ax_vs.set_ylabel('Velocities (km/s)')
ax_vs.set_xlabel('Pressures (GPa)')
# Now let's calculate some relaxed material properties.
# These are commonly used in geodynamic simulations, because
# advective velocities due to convection are slow compared
# with the velocity of reaction fronts at mantle temperatures
# Before computing relaxed properties, we can optionally choose
# to smooth the entropy and volume. In this way, we avoid peaks
# in the relaxed thermal expansivity and compressibility which
# tend to cause numerical problems for geodynamics software.
# Let's first define a grid to calculate properties.
# This grid needs to be reasonably densely sampled to
# capture all the gradients. Obviously, the density of
# sampling can be lower if we intend to smooth over long wavelengths
grid_pressures = np.linspace(min_grid_pressure, max_grid_pressure, n_P_gridpoints)
grid_temperatures = np.linspace(min_grid_temperature, max_grid_temperature, n_T_gridpoints)
# Here we choose to smooth by convolving the entropy and volume
# with a 2D Gaussian function with RMS widths of 0 or 0.5 GPa in pressure, and
# a temperature smoothing of f*P_stdev*max(dT/dP), where f is a factor > 0.25.
# The smoothing is truncated at the
# 4 sigma level.
for pressure_stdev in [0., pressure_std_dev]:
unsmoothed_isentrope_temperatures = isentrope(grid_pressures)
temperature_stdev = ( temperature_smoothing_factor * pressure_stdev *
np.max(np.abs( np.gradient(unsmoothed_isentrope_temperatures) )) /
(grid_pressures[1] - grid_pressures[0]) )
pp, TT = np.meshgrid(grid_pressures, grid_temperatures)
mesh_shape = pp.shape
pp = np.ndarray.flatten(pp)
TT = np.ndarray.flatten(TT)
# We could compute properties over the whole grid:
# grid_entropies, grid_volumes = rock.evaluate(['S', 'V'], pp, TT)
# However, we can save some time by computing only when temperature is close enough
# to the unsmoothed isentrope to affect the smoothing.
# The maximum temperature jump for this rock is about 50 K, so a reasonable Tmax is
# ~50 + 4.*temperature_stdev. We pad a bit more (an extra 30 K) just to be sure.
Tdiff_max = 50 + 30 + truncate*temperature_stdev
grid_entropies = np.zeros_like(pp)
grid_volumes = np.zeros_like(pp)
Tdiff = np.abs(isentrope(pp) - TT)
mask = [idx for idx, Td in enumerate(Tdiff) if Td < Tdiff_max]
grid_entropies[mask], grid_volumes[mask] = rock.evaluate(['S', 'V'], pp[mask], TT[mask])
grid_entropies = grid_entropies.reshape(mesh_shape)
grid_volumes = grid_volumes.reshape(mesh_shape)
# Having defined the grid and calculated unsmoothed properties,
# we now calculate the smoothed entropy and volume and derivatives with
# respect to pressure and temperature.
S_interps = burnman.tools.interp_smoothed_array_and_derivatives(array=grid_entropies,
x_values=grid_pressures,
y_values=grid_temperatures,
x_stdev=pressure_stdev,
y_stdev=temperature_stdev,
truncate=truncate)
interp_smoothed_S, interp_smoothed_dSdP, interp_smoothed_dSdT = S_interps
V_interps = burnman.tools.interp_smoothed_array_and_derivatives(array=grid_volumes,
x_values=grid_pressures,
y_values=grid_temperatures,
x_stdev=pressure_stdev,
y_stdev=temperature_stdev,
truncate=truncate)
interp_smoothed_V, interp_smoothed_dVdP, interp_smoothed_dVdT = V_interps
# Now we can calculate and plot the relaxed and smoothed properties along the isentrope
smoothed_temperatures = interp_isentrope(interp_smoothed_S, pressures, entropy, potential_temperature)
densities = rock.evaluate(['rho'], pressures, smoothed_temperatures)[0]
depths, gravity = compute_depth_gravity_profiles(pressures, densities, surface_gravity, outer_radius)
dT = 0.1
Vpsub, Vssub = rock.evaluate(['p_wave_velocity', 'shear_wave_velocity'],
pressures, smoothed_temperatures-dT/2.)
Vpadd, Vsadd = rock.evaluate(['p_wave_velocity', 'shear_wave_velocity'],
pressures, smoothed_temperatures+dT/2.)
Vps = (Vpadd + Vpsub)/2.
Vss = (Vsadd + Vssub)/2.
dVpdT = (Vpadd - Vpsub)/dT
dVsdT = (Vsadd - Vssub)/dT
volumes = np.array([interp_smoothed_V(p, T)[0] for (p, T) in zip(*[pressures, smoothed_temperatures])])
dSdT = np.array([interp_smoothed_dSdT(p, T)[0] for (p, T) in zip(*[pressures, smoothed_temperatures])])
dVdT = np.array([interp_smoothed_dVdT(p, T)[0] for (p, T) in zip(*[pressures, smoothed_temperatures])])
dVdP = np.array([interp_smoothed_dVdP(p, T)[0] for (p, T) in zip(*[pressures, smoothed_temperatures])])
specific_heats_relaxed = smoothed_temperatures * dSdT / rock.params['molar_mass']
alphas_relaxed = dVdT / volumes
compressibilities_relaxed = -dVdP / volumes
print('Min and max relaxed property when pressure smoothing standard deviation is {0:.2f} GPa'.format(pressure_stdev/1.e9))
print('Specific heat: {0:.2e}, {1:.2e}'.format(np.min(specific_heats_relaxed), np.max(specific_heats_relaxed)))
print('Thermal expansivity: {0:.2e}, {1:.2e}'.format(np.min(alphas_relaxed), np.max(alphas_relaxed)))
print('Compressibilities: {0:.2e}, {1:.2e}\n'.format(np.min(compressibilities_relaxed), np.max(compressibilities_relaxed)))
ax_T.plot(x, smoothed_temperatures, label='relaxed, smoothed (P_sd: {0:.1f} GPa)'.format(pressure_stdev/1.e9))
ax_z.plot(x, depths/1.e3)
ax_g.plot(x, gravity)
ax_rho.plot(x, densities)
ax_cp.plot(x, specific_heats_relaxed)
ax_alpha.plot(x, alphas_relaxed)
ax_beta.plot(x, compressibilities_relaxed)
ax_T.legend(loc='upper left')
fig.tight_layout()
plt.show()
# Convert to equal slices in depth
p_spline = UnivariateSpline(depths, pressures)
depths_eq = np.linspace(depths[0], depths[-1], n_points)
pressures_eq = p_spline(depths_eq)
smoothed_temperatures = np.interp(pressures_eq, pressures, smoothed_temperatures)
densities = np.interp(pressures_eq, pressures, densities)
gravity = np.interp(pressures_eq, pressures, gravity)
alphas_relaxed = np.interp(pressures_eq, pressures, alphas_relaxed)
specific_heats_relaxed = np.interp(pressures_eq, pressures, specific_heats_relaxed)
compressibilities_relaxed = np.interp(pressures_eq, pressures, compressibilities_relaxed)
Vss = np.interp(pressures_eq, pressures, Vss)
Vps = np.interp(pressures_eq, pressures, Vps)
dVsdT = np.interp(pressures_eq, pressures, dVsdT)
dVpdT = np.interp(pressures_eq, pressures, dVpdT)
# Finally, here's the ability to output smoothed, relaxed properties for use in ASPECT
# depth, pressure, temperature, density, gravity, Cp (per kilo), thermal expansivity
#if save_output == True:
# np.savetxt('isentrope_properties.txt', X=np.array([depths_eq, pressures_eq, smoothed_temperatures,
# densities, gravity, alphas_relaxed,
# specific_heats_relaxed,
# compressibilities_relaxed,
# Vss, Vps, dVsdT, dVpdT]).T,
# header='# This ASPECT-compatible file contains material properties calculated along an isentrope by the BurnMan software.\n# POINTS: {0}\n# depth (m), pressure (Pa), temperature (K), density (kg/m^3), gravity (m/s^2), thermal expansivity (1/K), specific heat (J/K/kg), compressibility (1/Pa), seismic Vs (m/s), seismic Vp (m/s), seismic dVs/dT (m/s/K), seismic dVp/dT (m/s/K)\ndepth\tpressure\ttemperature\tdensity\tgravity\tthermal_expansivity\tspecific_heat\tcompressibility\tseismic_Vs\tseismic_Vp\tseismic_dVs_dT\tseismic_dVp_dT'.format(n_points),
# fmt='%.10e', delimiter='\t', comments='')
|
tjhei/burnman
|
examples/example_geodynamic_adiabat.py
|
Python
|
gpl-2.0
| 14,925
|
[
"Gaussian"
] |
cc7ac0586b4cb21fb9583989625f52eeaa63a747ba305b34e118d51ab42457c0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2014 David Emms
#
# This program (OrthoFinder) is distributed under the terms of the GNU General Public License v3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# When publishing work that uses OrthoFinder please cite:
# Emms, D.M. and Kelly, S. (2015) OrthoFinder: solving fundamental biases in whole genome comparisons dramatically
# improves orthogroup inference accuracy, Genome Biology 16:157
#
# For any enquiries send an email to David Emms
# david_emms@hotmail.comhor: david
import os
import sys
import csv
import time
import shutil
import numpy as np
import subprocess
from collections import Counter, defaultdict
import itertools
import multiprocessing as mp
import warnings
try:
import queue
except ImportError:
import Queue as queue
PY2 = sys.version_info <= (3,)
csv_write_mode = 'wb' if PY2 else 'wt'
from . import util
from . import tree
from . import mcl as MCL
from . import stride
from . import trees2ologs_dlcpar
from . import trees2ologs_of
from . import blast_file_processor as BlastFileProcessor
from . import trees_msa
from . import wrapper_phyldog
from . import stag
from . import files
from . import parallel_task_manager
nThreads = util.nThreadsDefault
# Fix LD_LIBRARY_PATH when using pyinstaller
my_env = os.environ.copy()
if getattr(sys, 'frozen', False):
if 'LD_LIBRARY_PATH_ORIG' in my_env:
my_env['LD_LIBRARY_PATH'] = my_env['LD_LIBRARY_PATH_ORIG']
else:
my_env['LD_LIBRARY_PATH'] = ''
if 'DYLD_LIBRARY_PATH_ORIG' in my_env:
my_env['DYLD_LIBRARY_PATH'] = my_env['DYLD_LIBRARY_PATH_ORIG']
else:
my_env['DYLD_LIBRARY_PATH'] = ''
class Seq(object):
def __init__(self, seqInput):
""" Constructor takes sequence in any format and returns generators the
Seq object accordingly. If performance is really important then can write
individual an @classmethod to do that without the checks"""
if type(seqInput) is str:
a,b = seqInput.split("_")
self.iSp = int(a)
self.iSeq = int(b)
elif len(seqInput) == 2:
if seqInput[0] is str:
self.iSp, self.iSeq = list(map(int, seqInput))
else:
self.iSp= seqInput[0]
self.iSeq = seqInput[1]
else:
raise NotImplemented
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.ToString()
def ToString(self):
return "%d_%d" % (self.iSp, self.iSeq)
# ==============================================================================================================================
class OrthoGroupsSet(object):
def __init__(self, orthofinderWorkingDir_list, speciesToUse, nSpAll, qAddSpeciesToIDs, idExtractor = util.FirstWordExtractor):
self.speciesIDsEx = util.FullAccession(files.FileHandler.GetSpeciesIDsFN())
self._Spec_SeqIDs = None
self._extractor = idExtractor
self.seqIDsEx = None
self.ogs_all = None
self.iOgs4 = 0
self.speciesToUse = speciesToUse # list of ints
self.seqsInfo = util.GetSeqsInfo(orthofinderWorkingDir_list, self.speciesToUse, nSpAll)
self.id_to_og = None
self.qAddSpeciesToIDs = qAddSpeciesToIDs
def SequenceDict(self):
if self.seqIDsEx == None:
try:
self.seqIDsEx = self._extractor(files.FileHandler.GetSequenceIDsFN())
except RuntimeError as error:
print(str(error))
if str(error).startswith("ERROR"):
files.FileHandler.LogFailAndExit()
else:
print("Tried to use only the first part of the accession in order to list the sequences in each orthogroup\nmore concisely but these were not unique. The full accession line will be used instead.\n")
self.seqIDsEx = util.FullAccession(files.FileHandler.GetSequenceIDsFN())
return self.seqIDsEx.GetIDToNameDict()
def SpeciesDict(self):
d = self.speciesIDsEx.GetIDToNameDict()
return {k:v.rsplit(".",1)[0] for k,v in d.items()}
def Spec_SeqDict(self):
if self._Spec_SeqIDs != None:
return self._Spec_SeqIDs
seqs = self.SequenceDict()
seqs = {k:v for k,v in seqs.items() if int(k.split("_")[0]) in self.speciesToUse}
if not self.qAddSpeciesToIDs:
self._Spec_SeqIDs = seqs
return seqs
specs = self.SpeciesDict()
specs_ed = {k:v.replace(".", "_").replace(" ", "_") for k,v in specs.items()}
self._Spec_SeqIDs = {seqID:specs_ed[seqID.split("_")[0]] + "_" + name for seqID, name in seqs.items()}
return self._Spec_SeqIDs
def OGs(self, qInclAll=False):
if self.ogs_all != None:
if qInclAll:
return self.ogs_all
else:
return self.ogs_all[:self.iOgs4]
ogs = MCL.GetPredictedOGs(files.FileHandler.GetClustersFN())
self.ogs_all = [[Seq(g) for g in og] for og in ogs]
self.iOgs4 = len(self.ogs_all) if len(self.ogs_all[-1]) >= 4 else next(i for i, og in enumerate(self.ogs_all) if len(og) < 4)
if qInclAll:
return self.ogs_all
else:
return self.ogs_all[:self.iOgs4]
def OrthogroupMatrix(self):
""" qReduce give a matrix with only as many columns as species for cases when
clustering has been performed on a subset of species"""
ogs = self.OGs()
iSpecies = sorted(set([gene.iSp for og in ogs for gene in og]))
speciesIndexDict = {iSp:iCol for iCol, iSp in enumerate(iSpecies)}
nSpecies = len(iSpecies)
nGroups = len(ogs)
# (i, j)-th entry of ogMatrix gives the number of genes from i in orthologous group j
ogMatrix = np.zeros((nGroups, nSpecies))
for i_og, og in enumerate(ogs):
for gene in og:
ogMatrix[i_og, speciesIndexDict[gene.iSp]] += 1
return ogMatrix
def ID_to_OG_Dict(self):
if self.id_to_og != None:
return self.id_to_og
self.id_to_og = {g.ToString():iog for iog, og in enumerate(self.OGs()) for g in og}
return self.id_to_og
# ==============================================================================================================================
def lil_min(M):
n = M.shape[0]
mins = np.ones((n, 1), dtype = np.float64) * 9e99
for kRow in range(n):
values=M.getrowview(kRow)
if values.nnz == 0:
continue
mins[kRow] = min(values.data[0])
return mins
def lil_max(M):
n = M.shape[0]
maxes = np.zeros((n, 1), dtype = np.float64)
for kRow in range(n):
values=M.getrowview(kRow)
if values.nnz == 0:
continue
maxes[kRow] = max(values.data[0])
return maxes
def lil_minmax(M):
n = M.shape[0]
mins = np.ones((n, 1), dtype = np.float64) * 9e99
maxes = np.zeros((n, 1), dtype = np.float64)
for kRow in range(n):
values=M.getrowview(kRow)
if values.nnz == 0:
continue
mins[kRow] = min(values.data[0])
maxes[kRow] = max(values.data[0])
return mins, maxes
# ==============================================================================================================================
# Species trees for two- & three-species analyses
def WriteSpeciesTreeIDs_TwoThree(taxa, outFN):
"""
Get the unrooted species tree for two or three species
Args:
taxa - list of species names
Returns:
"""
t = tree.Tree()
for s in taxa:
t.add_child(tree.TreeNode(name=s))
t.write(outfile=outFN)
def GetSpeciesTreeRoot_TwoTaxa(taxa):
speciesTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
t = tree.Tree("(%s,%s);" % (taxa[0], taxa[1]))
t.write(outfile=speciesTreeFN_ids)
return speciesTreeFN_ids
# ==============================================================================================================================
# DendroBlast
def Worker_OGMatrices_ReadBLASTAndUpdateDistances(cmd_queue, worker_status_queue, iWorker, ogMatrices, nGenes, seqsInfo, blastDir_list, ogsPerSpecies, qDoubleBlast):
speciesToUse = seqsInfo.speciesToUse
with np.errstate(divide='ignore'):
while True:
try:
iiSp, sp1, nSeqs_sp1 = cmd_queue.get(True, 1)
worker_status_queue.put(("start", iWorker, iiSp))
Bs = [BlastFileProcessor.GetBLAST6Scores(seqsInfo, blastDir_list, sp1, sp2, qExcludeSelfHits = False, qDoubleBlast=qDoubleBlast) for sp2 in speciesToUse]
mins = np.ones((nSeqs_sp1, 1), dtype=np.float64)*9e99
maxes = np.zeros((nSeqs_sp1, 1), dtype=np.float64)
for B in Bs:
m0, m1 = lil_minmax(B)
mins = np.minimum(mins, m0)
maxes = np.maximum(maxes, m1)
maxes_inv = 1./maxes
for jjSp, B in enumerate(Bs):
for og, m in zip(ogsPerSpecies, ogMatrices):
for gi, i in og[iiSp]:
for gj, j in og[jjSp]:
m[i][j] = 0.5*max(B[gi.iSeq, gj.iSeq], mins[gi.iSeq]) * maxes_inv[gi.iSeq]
del Bs, B, mins, maxes, m0, m1, maxes_inv, m # significantly reduces RAM usage
worker_status_queue.put(("finish", iWorker, iiSp))
except queue.Empty:
worker_status_queue.put(("empty", iWorker, None))
return
def GetRAMErrorText():
text = "ERROR: The computer ran out of RAM and killed OrthoFinder processes\n"
text += "Try using a computer with more RAM. If you used the '-a' option\n"
text += "it may be possible to complete the run by removing this option."
return text
class DendroBLASTTrees(object):
def __init__(self, ogSet, nProcesses_alg, nProcess_std, qDoubleBlast):
self.ogSet = ogSet
self.nProcesses = nProcesses_alg
self.nProcess_std = nProcess_std
self.qDoubleBlast = qDoubleBlast
# Check files exist
def TreeFilename_IDs(self, iog):
return files.FileHandler.GetOGsTreeFN(iog)
def GetOGMatrices_FullParallel(self):
"""
read the blast files as well, remove need for intermediate pickle and unpickle
ogMatrices contains matrix M for each OG where:
Mij = 0.5*max(Bij, Bmin_i)/Bmax_i
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ogs = self.ogSet.OGs()
ogsPerSpecies = [[[(g, i) for i, g in enumerate(og) if g.iSp == iSp] for iSp in self.ogSet.seqsInfo.speciesToUse] for og in ogs]
nGenes = [len(og) for og in ogs]
nSeqs = self.ogSet.seqsInfo.nSeqsPerSpecies
ogMatrices = [[mp.Array('d', n, lock=False) for _ in range(n)] for n in nGenes]
blastDir_list = files.FileHandler.GetBlastResultsDir()
cmd_queue = mp.Queue()
for iiSp, sp1 in enumerate(self.ogSet.seqsInfo.speciesToUse):
cmd_queue.put((iiSp, sp1, nSeqs[sp1]))
worker_status_queue = mp.Queue()
runningProcesses = [mp.Process(target=Worker_OGMatrices_ReadBLASTAndUpdateDistances, args=(cmd_queue, worker_status_queue, iWorker, ogMatrices, nGenes, self.ogSet.seqsInfo, blastDir_list, ogsPerSpecies, self.qDoubleBlast)) for iWorker in range(self.nProcesses)]
for proc in runningProcesses:
proc.start()
rota = [None for iWorker in range(self.nProcesses)]
unfinished = []
while True:
# get process alive/dead
time.sleep(1)
alive = [proc.is_alive() for proc in runningProcesses]
# read latest updates from queue, update rota
try:
while True:
status, iWorker, iTask = worker_status_queue.get(True, 0.1)
if status == "start":
rota[iWorker] = iTask
elif status == "finish":
rota[iWorker] = None
elif status == "empty":
rota[iWorker] = "empty"
except queue.Empty:
pass
# if worker is dead but didn't finish task, issue warning
for al, r in zip(alive, rota):
if (not al) and (r != "empty"):
text = GetRAMErrorText()
files.FileHandler.LogFailAndExit(text)
unfinished.append(r)
if not any(alive):
break
if len(unfinished) != 0:
files.FileHandler.LogFailAndExit()
# print("WARNING: Computer ran out of RAM and killed OrthoFinder processes")
# print("OrthoFinder will attempt to run these processes once more. If it is")
# print("unsuccessful again then it will have to exit. Consider using")
# print("the option '-a 1' or running on a machine with more RAM")
#ogMatrices = [np.matrix(m) for m in ogMatrices]
return ogs, ogMatrices
def CompleteOGMatrices(self, ogs, ogMatrices):
newMatrices = []
for iog, (og, m) in enumerate(zip(ogs, ogMatrices)):
# dendroblast scores
n = m.shape[0]
m2 = np.zeros(m.shape)
max_og = -9e99
for i in range(n):
for j in range(i):
m2[i, j] = -np.log(m[i,j] + m[j,i])
m2[j, i] = m2[i, j]
max_og = max(max_og, m2[i,j])
newMatrices.append(m2)
return newMatrices
def CompleteAndWriteOGMatrices(self, ogs, ogMatrices):
"""
ogMatrices - each matrix is a list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
for iog, (og, m) in enumerate(zip(ogs, ogMatrices)):
# dendroblast scores
n = len(m)
max_og = -9e99
# Careful not to over-write a value and then attempt to try to use the old value
for i in range(n):
for j in range(i):
m[i][j] = -np.log(m[i][j] + m[j][i])
m[j][i] = m[i][j]
max_og = max(max_og, m[i][j])
self.WritePhylipMatrix(m, [g.ToString() for g in og], files.FileHandler.GetOGsDistMatFN(iog), max_og)
return ogMatrices
@staticmethod
def WritePhylipMatrix(m, names, outFN, max_og):
"""
m - list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
max_og = 1.1*max_og
sliver = 1e-6
with open(outFN, 'w') as outfile:
n = len(m)
outfile.write("%d\n" % n)
for i in range(n):
outfile.write(names[i] + " ")
# values could be -inf, these are the most distantly related so replace with max_og
V = [0. + (0. if i==j else m[i][j] if m[i][j] > -9e99 else max_og) for j in range(n)] # "0. +": hack to avoid printing out "-0"
V = [sliver if 0 < v < sliver else v for v in V] # make sure scientific notation is not used (not accepted by fastme)
values = " ".join(["%.6f" % v for v in V])
outfile.write(values + "\n")
def SpeciesTreeDistances(self, ogs, ogMatrices, method = 0):
"""
ogMatrices - each matrix is a list of mp.Array (so that each represents an nSeq x nSeq matrix
"""
spPairs = list(itertools.combinations(self.ogSet.seqsInfo.speciesToUse, 2))
D = [[] for _ in spPairs]
if method == 0:
""" closest distance for each species pair in each orthogroup"""
for og, m in zip(ogs, ogMatrices):
spDict = defaultdict(list)
for i, g in enumerate(og):
spDict[g.iSp].append(i)
for (sp1, sp2), d_list in zip(spPairs, D):
distances = [m[i][j] for i in spDict[sp1] for j in spDict[sp2]]
if len(distances) > 0: d_list.append(min(distances))
# d_list.append(min(distances) if len(distances) > 0 else None)
return D, spPairs
def PrepareSpeciesTreeCommand(self, D, spPairs, qPutInWorkingDir=False):
n = len(self.ogSet.seqsInfo.speciesToUse)
M = np.zeros((n, n))
for (sp1, sp2), d in zip(spPairs, D):
sp1 = self.ogSet.seqsInfo.speciesToUse.index(sp1)
sp2 = self.ogSet.seqsInfo.speciesToUse.index(sp2)
x = np.median(d)
M[sp1, sp2] = x
M[sp2, sp1] = x
speciesMatrixFN = files.FileHandler.GetSpeciesTreeMatrixFN(qPutInWorkingDir)
sliver = 1e-6
with open(speciesMatrixFN, 'w') as outfile:
outfile.write("%d\n" % n)
for i in range(n):
outfile.write(str(self.ogSet.seqsInfo.speciesToUse[i]) + " ")
V = [(0. + M[i,j]) for j in range(n)] # hack to avoid printing out "-0"
V = [sliver if 0 < v < sliver else v for v in V] # make sure scientific notation is not used (not accepted by fastme)
values = " ".join(["%.6f" % v for v in V])
outfile.write(values + "\n")
treeFN = files.FileHandler.GetSpeciesTreeUnrootedFN()
cmd = " ".join(["fastme", "-i", speciesMatrixFN, "-o", treeFN, "-N", "-w", "O"] + (["-s"] if n < 1000 else []))
return cmd, treeFN
def PrepareGeneTreeCommand(self):
cmds = []
ogs = self.ogSet.OGs()
for iog in range(len(ogs)):
nTaxa = len(ogs[iog])
cmds.append([" ".join(["fastme", "-i", files.FileHandler.GetOGsDistMatFN(iog), "-o", files.FileHandler.GetOGsTreeFN(iog), "-N", "-w", "O"] + (["-s"] if nTaxa < 1000 else []))])
return cmds
@staticmethod
def EnoughOGsForSTAG(ogs, speciesToUse):
nSp = len(speciesToUse)
nSp_perOG = [len(set([g.iSp for g in og])) for og in ogs]
return (nSp_perOG.count(nSp) >= 100)
def RunAnalysis(self, qSpeciesTree=True):
"""
Args:
qSpeciesTree - Bool: infer the species tree
"""
util.PrintUnderline("Calculating gene distances")
ogs, ogMatrices_partial = self.GetOGMatrices_FullParallel()
ogMatrices = self.CompleteAndWriteOGMatrices(ogs, ogMatrices_partial)
del ogMatrices_partial
util.PrintTime("Done")
cmds_trees = self.PrepareGeneTreeCommand()
qLessThanFourSpecies = len(self.ogSet.seqsInfo.speciesToUse) < 4
if not qSpeciesTree:
qSTAG = False
elif qLessThanFourSpecies:
qSTAG = False
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
WriteSpeciesTreeIDs_TwoThree(self.ogSet.seqsInfo.speciesToUse, spTreeFN_ids)
else:
qSTAG = self.EnoughOGsForSTAG(ogs, self.ogSet.seqsInfo.speciesToUse)
if not qSTAG:
print("Using fallback species tree inference method")
D, spPairs = self.SpeciesTreeDistances(ogs, ogMatrices)
cmd_spTree, spTreeFN_ids = self.PrepareSpeciesTreeCommand(D, spPairs)
cmds_trees = [[cmd_spTree]] + cmds_trees
del ogMatrices
util.PrintUnderline("Inferring gene and species trees" if qSpeciesTree else "Inferring gene trees")
parallel_task_manager.RunParallelOrderedCommandLists(self.nProcess_std, cmds_trees)
if qSTAG:
# Trees must have been completed
print("")
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
stag.Run_ForOrthoFinder(files.FileHandler.GetOGsTreeDir(), files.FileHandler.GetWorkingDirectory_Write(), self.ogSet.seqsInfo.speciesToUse, spTreeFN_ids)
if qSpeciesTree:
util.RenameTreeTaxa(spTreeFN_ids, files.FileHandler.GetSpeciesTreeUnrootedFN(True), self.ogSet.SpeciesDict(), qSupport=False, qFixNegatives=True)
return spTreeFN_ids, qSTAG
else:
return None, qSTAG
def SpeciesTreeOnly(self):
qLessThanFourSpecies = len(self.ogSet.seqsInfo.speciesToUse) < 4
if qLessThanFourSpecies:
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
WriteSpeciesTreeIDs_TwoThree(self.ogSet.seqsInfo.speciesToUse, spTreeFN_ids)
else:
ogs, ogMatrices_partial = self.GetOGMatrices_FullParallel()
ogMatrices = self.CompleteOGMatrices(ogs, ogMatrices_partial)
del ogMatrices_partial
D, spPairs = self.SpeciesTreeDistances(ogs, ogMatrices)
del ogMatrices
cmd_spTree, spTreeFN_ids = self.PrepareSpeciesTreeCommand(D, spPairs, True)
parallel_task_manager.RunOrderedCommandList([cmd_spTree])
spTreeUnrootedFN = files.FileHandler.GetSpeciesTreeUnrootedFN(True)
util.RenameTreeTaxa(spTreeFN_ids, spTreeUnrootedFN, self.ogSet.SpeciesDict(), qSupport=False, qFixNegatives=True)
return spTreeFN_ids
# ==============================================================================================================================
# Main
def CheckUserSpeciesTree(speciesTreeFN, expSpecies):
# File exists
if not os.path.exists(speciesTreeFN):
print(("Species tree file does not exist: %s" % speciesTreeFN))
util.Fail()
# Species in tree are unique
try:
t = tree.Tree(speciesTreeFN, format=1)
except Exception as e:
print("\nERROR: Incorrectly formated user-supplied species tree")
print(str(e))
util.Fail()
actSpecies = (t.get_leaf_names())
c = Counter(actSpecies)
if 1 != c.most_common()[0][1]:
print("ERROR: Species names in species tree are not unique")
for sp, n in c.most_common():
if 1 != n:
print(("Species '%s' appears %d times" % (sp, n)))
util.Fail()
# All required species are present
actSpecies = set(actSpecies)
ok = True
for sp in expSpecies:
if sp not in actSpecies:
print(("ERROR: '%s' is missing from species tree" % sp))
ok = False
# expected species are unique
c = Counter(expSpecies)
if 1 != c.most_common()[0][1]:
print("ERROR: Species names are not unique")
for sp, n in c.most_common():
if 1 != n:
print(("Species '%s' appears %d times" % (sp, n)))
util.Fail()
expSpecies = set(expSpecies)
for sp in actSpecies:
if sp not in expSpecies:
print(("ERROR: Additional species '%s' in species tree" % sp))
ok = False
if not ok: util.Fail()
# Tree is rooted
if len(t.get_children()) != 2:
print("ERROR: Species tree is not rooted")
util.Fail()
def ConvertUserSpeciesTree(speciesTreeFN_in, speciesDict, speciesTreeFN_out):
t = tree.Tree(speciesTreeFN_in, format=1)
t.prune(t.get_leaf_names())
revDict = {v:k for k,v in speciesDict.items()}
for sp in t:
sp.name = revDict[sp.name]
t.write(outfile=speciesTreeFN_out)
def WriteTestDistancesFile(testFN):
with open(testFN, 'w') as outfile:
outfile.write("4\n1_1 0 0 0.2 0.25\n0_2 0 0 0.21 0.28\n3_1 0.2 0.21 0 0\n4_1 0.25 0.28 0 0")
return testFN
def CanRunOrthologueDependencies(workingDir, qMSAGeneTrees, qPhyldog, qStopAfterTrees, msa_method, tree_method, recon_method, program_caller, qStopAfterAlignments):
# FastME
if (not qMSAGeneTrees):
testFN = workingDir + "SimpleTest.phy"
WriteTestDistancesFile(testFN)
outFN = workingDir + "SimpleTest.tre"
if os.path.exists(outFN): os.remove(outFN)
if not parallel_task_manager.CanRunCommand("fastme -i %s -o %s" % (testFN, outFN), qAllowStderr=False):
print("ERROR: Cannot run fastme")
print("Please check FastME is installed and that the executables are in the system path.\n")
return False
os.remove(testFN)
os.remove(outFN)
fastme_stat_fn = workingDir + "SimpleTest.phy_fastme_stat.txt"
if os.path.exists(fastme_stat_fn): os.remove(fastme_stat_fn)
# DLCPar
if ("dlcpar" in recon_method) and not (qStopAfterTrees or qStopAfterAlignments):
if not parallel_task_manager.CanRunCommand("dlcpar_search --version", qAllowStderr=False):
print("ERROR: Cannot run dlcpar_search")
print("Please check DLCpar is installed and that the executables are in the system path.\n")
return False
if recon_method == "dlcpar_convergedsearch":
capture = subprocess.Popen("dlcpar_search --version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env)
stdout = [x for x in capture.stdout]
try:
stdout = "".join([x.decode() for x in stdout])
except (UnicodeDecodeError, AttributeError):
stdout = "".join([x.encode() for x in stdout])
version = stdout.split()[-1]
tokens = list(map(int, version.split(".")))
major, minor = tokens[:2]
release = tokens[2] if len(tokens) > 2 else 0
# require 1.0.1 or above
actual = (major, minor, release)
required = [1,0,1]
versionOK = True
for r, a in zip(required, actual):
if a > r:
versionOK = True
break
elif a < r:
versionOK = False
break
else:
pass
# need to check next level down
if not versionOK:
print("ERROR: dlcpar_convergedsearch requires dlcpar_search version 1.0.1 or above")
return False
# FastTree & MAFFT
if qMSAGeneTrees or qPhyldog:
testFN, temp_dir = trees_msa.WriteTestFile(workingDir)
if msa_method == "mafft":
if not parallel_task_manager.CanRunCommand("mafft %s" % testFN, qAllowStderr=True):
print("ERROR: Cannot run mafft")
print("Please check MAFFT is installed and that the executables are in the system path\n")
return False
elif msa_method != None:
if not program_caller.TestMSAMethod(msa_method):
print(("ERROR: Cannot run user-configured MSA method '%s'" % msa_method))
print("Please check program is installed and that it is correctly configured in the orthofinder/config.json file\n")
return False
if tree_method == "fasttree":
if qMSAGeneTrees and (not qStopAfterAlignments) and not parallel_task_manager.CanRunCommand("FastTree %s" % testFN, qAllowStderr=True):
print("ERROR: Cannot run FastTree")
print("Please check FastTree is installed and that the executables are in the system path\n")
return False
elif tree_method != None:
if not program_caller.TestTreeMethod(tree_method):
print(("ERROR: Cannot run user-configured tree method '%s'" % tree_method))
print("Please check program is installed and that it is correctly configured in the orthofinder/config.json file\n")
return False
try:
shutil.rmtree(temp_dir)
except OSError:
time.sleep(1)
shutil.rmtree(temp_dir, True) # shutil / NFS bug - ignore errors, it's less crucial that the files are deleted
if qPhyldog:
if not parallel_task_manager.CanRunCommand("mpirun -np 1 phyldog", qAllowStderr=False):
print("ERROR: Cannot run mpirun -np 1 phyldog")
print("Please check phyldog is installed and that the executable is in the system path\n")
return False
return True
def PrintHelp():
print("Usage")
print("-----")
print("orthologues.py orthofinder_results_directory [-t max_number_of_threads]")
print("orthologues.py -h")
print("\n")
print("Arguments")
print("---------")
print("""orthofinder_results_directory
Generate gene trees for the orthogroups, generated rooted species tree and infer ortholgues.\n""")
print(("""-t max_number_of_threads, --threads max_number_of_threads
The maximum number of processes to be run simultaneously. The deafult is %d but this
should be increased by the user to the maximum number of cores available.\n""" % util.nThreadsDefault))
print("""-h, --help
Print this help text""")
util.PrintCitation()
def WriteOrthologuesMatrix(fn, matrix, speciesToUse, speciesDict):
with open(fn, csv_write_mode) as outfile:
writer = csv.writer(outfile, delimiter="\t")
writer.writerow([""] + [speciesDict[str(index)] for index in speciesToUse])
for ii, iSp in enumerate(speciesToUse):
overlap = [matrix[ii, jj] for jj, jSp in enumerate(speciesToUse)]
writer.writerow([speciesDict[str(iSp)]] + overlap)
def WriteOrthologuesStats(ogSet, nOrtho_sp):
"""
nOrtho_sp is a util.nOrtho_sp object
"""
speciesToUse = ogSet.speciesToUse
speciesDict = ogSet.SpeciesDict()
d = files.FileHandler.GetOGsStatsResultsDirectory()
WriteOrthologuesMatrix(d + "OrthologuesStats_Totals.tsv", nOrtho_sp.n, speciesToUse, speciesDict)
WriteOrthologuesMatrix(d + "OrthologuesStats_one-to-one.tsv", nOrtho_sp.n_121, speciesToUse, speciesDict)
WriteOrthologuesMatrix(d + "OrthologuesStats_one-to-many.tsv", nOrtho_sp.n_12m, speciesToUse, speciesDict)
WriteOrthologuesMatrix(d + "OrthologuesStats_many-to-one.tsv", nOrtho_sp.n_m21, speciesToUse, speciesDict)
WriteOrthologuesMatrix(d + "OrthologuesStats_many-to-many.tsv", nOrtho_sp.n_m2m, speciesToUse, speciesDict)
# Duplications
nodeCount = defaultdict(int)
nodeCount_50 = defaultdict(int)
ogCount = defaultdict(int)
ogCount_50 = defaultdict(int)
if not os.path.exists(files.FileHandler.GetDuplicationsFN()): return
with open(files.FileHandler.GetDuplicationsFN(), 'rb' if PY2 else 'rt') as infile:
reader = csv.reader(infile, delimiter="\t")
next(reader)
# for line in reader:
# try:
# og, node, _, support, _, _, _ = line
# except:
# print(line)
# raise
for og, node, _, support, _, _, _ in reader:
support = float(support)
nodeCount[node] += 1
ogCount[og] += 1
if support >= 0.5:
nodeCount_50[node] += 1
ogCount_50[og] += 1
with open(d + "Duplications_per_Species_Tree_Node.tsv", csv_write_mode) as outfile:
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(["Species Tree Node", "Duplications (all)", "Duplications (50% support)"])
# max_node = max([int(s[1:]) for s in nodeCount.keys()]) # Get largest node number
for node in nodeCount:
writer.writerow([node, nodeCount[node], nodeCount_50[node]])
# Write on species tree
in_tree_fn = files.FileHandler.GetSpeciesTreeResultsNodeLabelsFN()
out_tree_fn = os.path.split(files.FileHandler.GetDuplicationsFN())[0] + "/SpeciesTree_Gene_Duplications_0.5_Support.txt"
t = tree.Tree(in_tree_fn, format=1)
for n in t.traverse():
n.name = n.name + "_" + str(nodeCount_50[n.name])
with open(out_tree_fn, 'w') as outfile:
outfile.write(t.write(format=1)[:-1] + t.name + ";")
with open(d + "Duplications_per_Orthogroup.tsv", csv_write_mode) as outfile:
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(["Orthogroup", "Duplications (all)", "Duplications (50% support)"])
if len(ogCount) > 0:
max_og = max([int(s[2:]) for s in ogCount.keys()])
pat = files.FileHandler.baseOgFormat
for i in range(max_og + 1):
og = pat % i
writer.writerow([og, ogCount[og], ogCount_50[og]])
def TwoAndThreeGeneHOGs(ogSet, st_rooted_labelled, hog_writer):
ogs = ogSet.OGs(qInclAll=True)
for iog, og in enumerate(ogs):
n = len(og)
if n < 2 or n > 3: continue
og_name = "OG%07d" % iog
sp_present = set([str(g.iSp) for g in og])
stNode = trees2ologs_of.MRCA_node(st_rooted_labelled, sp_present)
hogs_to_write = hog_writer.get_skipped_nodes(stNode, None)
if len(sp_present) > 1:
# We don't create files for 'species specific HOGs'
st_node = trees2ologs_of.MRCA_node(st_rooted_labelled, sp_present)
hogs_to_write = hogs_to_write + [st_node.name]
genes = [g.ToString() for g in og] # Inefficient as will convert back again, but trivial cost I think
hog_writer.write_hog_genes(genes, hogs_to_write, og_name)
def TwoAndThreeGeneOrthogroups(ogSet, resultsDir):
speciesDict = ogSet.SpeciesDict()
sequenceDict = ogSet.SequenceDict()
ogs = ogSet.OGs(qInclAll=True)
nOrthologues_SpPair = util.nOrtho_sp(len(ogSet.speciesToUse))
all_orthologues = []
d_empty = defaultdict(list)
for iog, og in enumerate(ogs):
n = len(og)
if n == 1: break
elif n == 2:
if og[0].iSp == og[1].iSp: continue
# orthologues is a list of tuples of dictionaries
# each dictionary is sp->list of genes in species
d0 = defaultdict(list)
d0[str(og[0].iSp)].append(str(og[0].iSeq))
d1 = defaultdict(list)
d1[str(og[1].iSp)].append(str(og[1].iSeq))
orthologues = [(d0, d1, d_empty, d_empty)]
elif n == 3:
sp = [g.iSp for g in og]
c = Counter(sp)
nSp = len(c)
if nSp == 3:
g = [(str(g.iSp), str(g.iSeq)) for g in og]
d0 = defaultdict(list)
d0[g[0][0]].append(g[0][1])
d1 = defaultdict(list)
d1[g[1][0]].append(g[1][1])
d1[g[2][0]].append(g[2][1])
orthologues = [(d0, d1, d_empty, d_empty)]
d0 = defaultdict(list)
d0[g[1][0]].append(g[1][1])
d1 = defaultdict(list)
d1[g[2][0]].append(g[2][1])
orthologues.append((d0,d1, d_empty, d_empty))
elif nSp == 2:
sp0, sp1 = list(c.keys())
d0 = defaultdict(list)
d0[str(sp0)] = [str(g.iSeq) for g in og if g.iSp == sp0]
d1 = defaultdict(list)
d1[str(sp1)] = [str(g.iSeq) for g in og if g.iSp == sp1]
orthologues = [(d0, d1, d_empty, d_empty)]
else:
continue # no orthologues
elif n >= 4:
continue
all_orthologues.append((iog, orthologues))
nspecies = len(ogSet.speciesToUse)
sp_to_index = {str(sp):i for i, sp in enumerate(ogSet.speciesToUse)}
with trees2ologs_of.OrthologsFiles(resultsDir, speciesDict, ogSet.speciesToUse, nspecies, sp_to_index) as (olog_files_handles, suspect_genes_file_handles):
olog_lines_tot = [["" for j in range(nspecies)] for i in range(nspecies)]
olog_sus_lines_tot = ["" for i in range(nspecies)]
nOrthologues_SpPair += trees2ologs_of.GetLinesForOlogFiles(all_orthologues, speciesDict, ogSet.speciesToUse, sequenceDict,
False, olog_lines_tot, olog_sus_lines_tot)
# olog_sus_lines_tot will be empty
lock_dummy = mp.Lock()
for i in range(nspecies):
for j in range(nspecies):
trees2ologs_of.WriteOlogLinesToFile(olog_files_handles[i][j], olog_lines_tot[i][j], lock_dummy)
return nOrthologues_SpPair
def ReconciliationAndOrthologues(recon_method, ogSet, nHighParallel, nLowParallel, iSpeciesTree=None, stride_dups=None, q_split_para_clades=False):
"""
ogSet - info about the orthogroups, species etc
resultsDir - where the Orthologues top level results directory will go (should exist already)
reconTreesRenamedDir - where to put the reconcilled trees that use the gene accessions
iSpeciesTree - which of the potential roots of the species tree is this
method - can be dlcpar, dlcpar_deep, of_recon
"""
speciesTree_ids_fn = files.FileHandler.GetSpeciesTreeIDsRootedFN()
labeled_tree_fn = files.FileHandler.GetSpeciesTreeResultsNodeLabelsFN()
util.RenameTreeTaxa(speciesTree_ids_fn, labeled_tree_fn, ogSet.SpeciesDict(), qSupport=False, qFixNegatives=True, label='N')
workingDir = files.FileHandler.GetWorkingDirectory_Write() # workingDir - Orthologues working dir
resultsDir_ologs = files.FileHandler.GetOrthologuesDirectory()
reconTreesRenamedDir = files.FileHandler.GetOGsReconTreeDir(True)
if "dlcpar" in recon_method:
qDeepSearch = (recon_method == "dlcpar_convergedsearch")
util.PrintTime("Starting DLCpar")
dlcparResultsDir, dlcparLocusTreePat = trees2ologs_dlcpar.RunDlcpar(ogSet, speciesTree_ids_fn, workingDir, nHighParallel, qDeepSearch)
util.PrintTime("Done DLCpar")
spec_seq_dict = ogSet.Spec_SeqDict()
for iog in range(len(ogSet.OGs())):
util.RenameTreeTaxa(dlcparResultsDir + dlcparLocusTreePat % iog, files.FileHandler.GetOGsReconTreeFN(iog), spec_seq_dict, qSupport=False, qFixNegatives=False, inFormat=8, label='n')
# Orthologue lists
util.PrintUnderline("Inferring orthologues from gene trees" + (" (root %d)"%iSpeciesTree if iSpeciesTree != None else ""))
pickleDir = files.FileHandler.GetPickleDir()
nOrthologues_SpPair = trees2ologs_dlcpar.create_orthologue_lists(ogSet, resultsDir_ologs, dlcparResultsDir, pickleDir)
elif "phyldog" == recon_method:
util.PrintTime("Starting Orthologues from Phyldog")
nOrthologues_SpPair = trees2ologs_of.DoOrthologuesForOrthoFinder_Phyldog(ogSet, workingDir, trees2ologs_of.GeneToSpecies_dash, resultsDir_ologs, reconTreesRenamedDir)
util.PrintTime("Done Orthologues from Phyldog")
else:
start = time.time()
util.PrintTime("Starting OF Orthologues")
qNoRecon = ("only_overlap" == recon_method)
# The next function should not create the HOG writer and label the species tree. This should be done here and passed as arguments
species_tree_rooted_labelled = tree.Tree(speciesTree_ids_fn)
# Label nodes of species tree
species_tree_rooted_labelled.name = "N0"
iNode = 1
node_names = [species_tree_rooted_labelled.name]
for n in species_tree_rooted_labelled.traverse():
if (not n.is_leaf()) and (not n.is_root()):
n.name = "N%d" % iNode
node_names.append(n.name)
iNode += 1
# HOG Writer
speciesDict = ogSet.SpeciesDict()
SequenceDict = ogSet.SequenceDict()
hog_writer = trees2ologs_of.HogWriter(species_tree_rooted_labelled, node_names, SequenceDict, speciesDict, ogSet.speciesToUse)
nOrthologues_SpPair = trees2ologs_of.DoOrthologuesForOrthoFinder(ogSet, species_tree_rooted_labelled, trees2ologs_of.GeneToSpecies_dash,
stride_dups, qNoRecon, hog_writer, q_split_para_clades, nLowParallel)
util.PrintTime("Done OF Orthologues")
TwoAndThreeGeneHOGs(ogSet, species_tree_rooted_labelled, hog_writer)
hog_writer.close_files()
nOrthologues_SpPair += TwoAndThreeGeneOrthogroups(ogSet, resultsDir_ologs)
if nLowParallel > 1 and "phyldog" != recon_method and "dlcpar" not in recon_method:
trees2ologs_of.SortParallelFiles(nLowParallel, ogSet.speciesToUse, speciesDict)
stop = time.time()
# print("%fs for orthologs etc" % (stop-start))
WriteOrthologuesStats(ogSet, nOrthologues_SpPair)
# print("Identified %d orthologues" % nOrthologues)
def OrthologuesFromTrees(recon_method, nHighParallel, nLowParallel, userSpeciesTree_fn, qAddSpeciesToIDs, q_split_para_clades):
"""
userSpeciesTree_fn - None if not supplied otherwise rooted tree using user species names (not orthofinder IDs)
qUserSpTree - is the speciesTree_fn user-supplied
Just infer orthologues from trees, don't do any of the preceeding steps.
"""
speciesToUse, nSpAll, _ = util.GetSpeciesToUse(files.FileHandler.GetSpeciesIDsFN())
ogSet = OrthoGroupsSet(files.FileHandler.GetWorkingDirectory1_Read(), speciesToUse, nSpAll, qAddSpeciesToIDs, idExtractor = util.FirstWordExtractor)
if userSpeciesTree_fn != None:
speciesDict = files.FileHandler.GetSpeciesDict()
speciesToUseNames = [speciesDict[str(iSp)] for iSp in ogSet.speciesToUse]
CheckUserSpeciesTree(userSpeciesTree_fn, speciesToUseNames)
speciesTreeFN_ids = files.FileHandler.GetSpeciesTreeIDsRootedFN()
ConvertUserSpeciesTree(userSpeciesTree_fn, speciesDict, speciesTreeFN_ids)
util.PrintUnderline("Running Orthologue Prediction", True)
util.PrintUnderline("Reconciling gene and species trees")
ReconciliationAndOrthologues(recon_method, ogSet, nHighParallel, nLowParallel, q_split_para_clades=q_split_para_clades)
util.PrintUnderline("Writing results files")
util.PrintTime("Writing results files")
files.FileHandler.CleanWorkingDir2()
def OrthologuesWorkflow(speciesToUse, nSpAll,
tree_options,
msa_method,
tree_method,
recon_method,
nHighParallel,
nLowParallel,
qDoubleBlast,
qAddSpeciesToIDs,
qTrim,
userSpeciesTree = None,
qStopAfterSeqs = False,
qStopAfterAlign = False,
qStopAfterTrees = False,
qMSA = False,
qPhyldog = False,
results_name = "",
q_split_para_clades=False):
"""
1. Setup:
- ogSet, directories
- DendroBLASTTress - object
2. DendrobBLAST:
- read scores
- RunAnalysis: Get distance matrices, do trees
3. Root species tree
4. Reconciliation/Orthologues
5. Clean up
Variables:
- ogSet - all the relevant information about the orthogroups, species etc.
"""
ogSet = OrthoGroupsSet(files.FileHandler.GetWorkingDirectory1_Read(), speciesToUse, nSpAll, qAddSpeciesToIDs, idExtractor = util.FirstWordExtractor)
tree_generation_method = "msa" if qMSA or qPhyldog else "dendroblast"
stop_after = "seqs" if qStopAfterSeqs else "align" if qStopAfterAlign else ""
files.FileHandler.MakeResultsDirectory2(tree_generation_method, stop_after, results_name)
""" === 1 === ust = UserSpeciesTree
MSA: Sequences Alignments GeneTrees db SpeciesTree
Phyldog: Sequences Alignments GeneTrees db SpeciesTree
Dendroblast: DistanceMatrices GeneTrees db SpeciesTree
MSA (ust): Sequences Alignments GeneTrees db
Phyldog (ust): Sequences Alignments GeneTrees db
Dendroblast (ust): DistanceMatrices GeneTrees db
"""
qDB_SpeciesTree = False
if userSpeciesTree:
util.PrintUnderline("Using user-supplied species tree")
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
ConvertUserSpeciesTree(userSpeciesTree, ogSet.SpeciesDict(), spTreeFN_ids)
if qMSA or qPhyldog:
qLessThanFourSpecies = len(ogSet.seqsInfo.speciesToUse) < 4
treeGen = trees_msa.TreesForOrthogroups(tree_options, msa_method, tree_method)
if (not userSpeciesTree) and qLessThanFourSpecies:
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
WriteSpeciesTreeIDs_TwoThree(ogSet.seqsInfo.speciesToUse, spTreeFN_ids)
util.RenameTreeTaxa(spTreeFN_ids, files.FileHandler.GetSpeciesTreeUnrootedFN(True), ogSet.SpeciesDict(), qSupport=False, qFixNegatives=True)
qDoMSASpeciesTree = (not qLessThanFourSpecies) and (not userSpeciesTree)
util.PrintTime("Starting MSA/Trees")
seqs_alignments_dirs = treeGen.DoTrees(ogSet.OGs(qInclAll=True),
ogSet.OrthogroupMatrix(),
ogSet.Spec_SeqDict(),
ogSet.SpeciesDict(),
ogSet.speciesToUse,
nHighParallel,
qStopAfterSeqs,
qStopAfterAlign or qPhyldog,
qDoSpeciesTree=qDoMSASpeciesTree,
qTrim = qTrim)
util.PrintTime("Done MSA/Trees")
if qDoMSASpeciesTree:
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
if qStopAfterSeqs:
print("")
return
elif qStopAfterAlign:
print("")
return
db = DendroBLASTTrees(ogSet, nLowParallel, nHighParallel, qDoubleBlast)
if qDB_SpeciesTree and not userSpeciesTree and not qLessThanFourSpecies:
util.PrintUnderline("Inferring species tree (calculating gene distances)")
print("Loading BLAST scores")
spTreeFN_ids = db.SpeciesTreeOnly()
if qPhyldog:
# util.PrintTime("Do species tree for phyldog")
# spTreeFN_ids, spTreeUnrootedFN = db.SpeciesTreeOnly()
if userSpeciesTree:
userSpeciesTree = ConvertUserSpeciesTree(userSpeciesTree, ogSet.SpeciesDict(), files.FileHandler.GetSpeciesTreeUnrootedFN())
util.PrintTime("Starting phyldog")
species_tree_ids_labelled_phyldog = wrapper_phyldog.RunPhyldogAnalysis(files.FileHandler.GetPhyldogWorkingDirectory(), ogSet.OGs(), speciesToUse, nHighParallel)
else:
db = DendroBLASTTrees(ogSet, nLowParallel, nHighParallel, qDoubleBlast)
spTreeFN_ids, qSTAG = db.RunAnalysis(userSpeciesTree == None)
if userSpeciesTree != None:
spTreeFN_ids = files.FileHandler.GetSpeciesTreeUnrootedFN()
files.FileHandler.LogWorkingDirectoryTrees()
qSpeciesTreeSupports = False if (userSpeciesTree or qMSA or qPhyldog) else qSTAG
"""
SpeciesTree
spTreeFN_ids, or equivalently FileHandler.GetSpeciesTreeUnrootedFN() in all cases (user, inferred etc)
Thus, we always have the species tree ids format
With phyldog, we also have species_tree_ids_labelled_phyldog - with the node labels given by phyldog
"""
""" === 3 ===
MSA: RootSpeciesTree
Phyldog: RootSpeciesTree
Dendroblast: RootSpeciesTree
MSA (ust): ConvertSpeciesTreeIDs
Phyldog (ust): ConvertSpeciesTreeIDs
Dendroblast (ust): ConvertSpeciesTreeIDs
"""
if qPhyldog:
rootedSpeciesTreeFN = [species_tree_ids_labelled_phyldog]
roots = [None]
qMultiple = False
stride_dups = None
elif userSpeciesTree:
rootedSpeciesTreeFN = [spTreeFN_ids]
roots = [None]
qMultiple = False
stride_dups = None
elif len(ogSet.seqsInfo.speciesToUse) == 2:
hardcodeSpeciesTree = GetSpeciesTreeRoot_TwoTaxa(ogSet.seqsInfo.speciesToUse)
rootedSpeciesTreeFN = [hardcodeSpeciesTree]
roots = [None]
qMultiple = False
stride_dups = None
else:
util.PrintUnderline("Best outgroup(s) for species tree")
util.PrintTime("Starting STRIDE")
roots, clusters_counter, rootedSpeciesTreeFN, nSupport, _, _, stride_dups = stride.GetRoot(spTreeFN_ids, files.FileHandler.GetOGsTreeDir(), stride.GeneToSpecies_dash, nHighParallel, qWriteRootedTree=True)
util.PrintTime("Done STRIDE")
nAll = sum(clusters_counter.values())
nFP_mp = nAll - nSupport
n_non_trivial = sum([v for k, v in clusters_counter.items() if len(k) > 1])
if len(roots) > 1:
print(("Observed %d well-supported, non-terminal duplications. %d support the best roots and %d contradict them." % (n_non_trivial, n_non_trivial-nFP_mp, nFP_mp)))
print("Best outgroups for species tree:")
else:
print(("Observed %d well-supported, non-terminal duplications. %d support the best root and %d contradict it." % (n_non_trivial, n_non_trivial-nFP_mp, nFP_mp)))
print("Best outgroup for species tree:")
spDict = ogSet.SpeciesDict()
for r in roots: print((" " + (", ".join([spDict[s] for s in r])) ))
qMultiple = len(roots) > 1
shutil.copy(rootedSpeciesTreeFN[0], files.FileHandler.GetSpeciesTreeIDsRootedFN())
"""
SpeciesTree:
We now have a list of rooted species trees: rootedSpeciesTreeFN (this should be recorded by the file handler)
"""
if qStopAfterTrees:
# root the gene trees using the species tree and write out their accessions - really I could remove the whole '-ot, -os, -oa' options, they are probably rarely used if ever.
if userSpeciesTree:
return
# otherwise, root species tree
resultsSpeciesTrees = []
for i, (r, speciesTree_fn) in enumerate(zip(roots, rootedSpeciesTreeFN)):
resultsSpeciesTrees.append(files.FileHandler.GetSpeciesTreeResultsFN(i, not qMultiple))
util.RenameTreeTaxa(speciesTree_fn, resultsSpeciesTrees[-1], db.ogSet.SpeciesDict(), qSupport=qSpeciesTreeSupports, qFixNegatives=True)
labeled_tree_fn = files.FileHandler.GetSpeciesTreeResultsNodeLabelsFN()
util.RenameTreeTaxa(speciesTree_fn, labeled_tree_fn, db.ogSet.SpeciesDict(), qSupport=False, qFixNegatives=True, label='N')
idDict = ogSet.Spec_SeqDict()
qHaveSupport = None
for iog in range(len(ogSet.OGs())):
infn = files.FileHandler.GetOGsTreeFN(iog)
if os.path.exists(infn):
if qHaveSupport is None: qHaveSupport = util.HaveSupportValues(infn)
util.RenameTreeTaxa(infn, files.FileHandler.GetOGsTreeFN(iog, True), idDict, qSupport=qHaveSupport, qFixNegatives=True)
files.FileHandler.CleanWorkingDir2()
if qMultiple: print("\nWARNING: Multiple potential species tree roots were identified, only one will be analyed.")
resultsSpeciesTrees = []
i = 0
r = roots[0]
speciesTree_fn = rootedSpeciesTreeFN[0]
util.PrintUnderline("Reconciling gene trees and species tree")
resultsSpeciesTrees.append(files.FileHandler.GetSpeciesTreeResultsFN(0, True))
if (not userSpeciesTree) and (not qPhyldog) and len(ogSet.seqsInfo.speciesToUse) != 2:
print(("Outgroup: " + (", ".join([spDict[s] for s in r]))))
util.RenameTreeTaxa(speciesTree_fn, resultsSpeciesTrees[-1], db.ogSet.SpeciesDict(), qSupport=qSpeciesTreeSupports, qFixNegatives=True)
util.PrintTime("Starting Recon and orthologues")
ReconciliationAndOrthologues(recon_method, db.ogSet, nHighParallel, nLowParallel, i if qMultiple else None, stride_dups=stride_dups, q_split_para_clades=q_split_para_clades)
# util.PrintTime("Done Recon")
if qMultiple:
for i, (r, speciesTree_fn) in enumerate(zip(roots, rootedSpeciesTreeFN)):
unanalysedSpeciesTree = files.FileHandler.GetSpeciesTreeResultsFN(i, False)
util.RenameTreeTaxa(speciesTree_fn, unanalysedSpeciesTree, db.ogSet.SpeciesDict(), qSupport=qSpeciesTreeSupports, qFixNegatives=True, label='N')
"""
SpeciesTree: If it's been inferred, there is now at least one rooted results species trees: GetSpeciesTreeResultsFN()
"""
files.FileHandler.CleanWorkingDir2()
util.PrintUnderline("Writing results files", True)
|
davidemms/OrthoFinder
|
scripts_of/orthologues.py
|
Python
|
gpl-3.0
| 53,879
|
[
"BLAST"
] |
8e93e4e34ec3e7ec429bdf4ba61e2f53468aff36cee73d5fc5abc1824eedd094
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import os
import json
import collections
import itertools
from abc import ABCMeta, abstractmethod
import random
import warnings
from fnmatch import fnmatch
import re
import functools
from math import gcd
import numpy as np
from monty.dev import deprecated
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.json import MSONable
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.util.coord import get_angle, all_distances, \
lattice_points_in_supercell
from pymatgen.core.units import Mass, Length
from monty.io import zopen
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class SiteCollection(collections.abc.Sequence, metaclass=ABCMeta):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@property
@abstractmethod
def sites(self):
"""
Returns a tuple of sites.
"""
return
@abstractmethod
def get_distance(self, i: int, j: int) -> float:
"""
Returns distance between sites at index i and j.
Args:
i: Index of first site
j: Index of second site
Returns:
Distance between sites at index i and index j.
"""
return
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self):
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Specie]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self):
"""
List of species and occupancies at each site of the structure.
"""
return [site.species for site in self]
@property
def ntypesp(self):
"""Number of types of atoms."""
return len(self.types_of_specie)
@property
def types_of_specie(self):
"""
List of types of specie. Only works for ordered structures.
Disordered structures will raise TypeError.
"""
if not self.is_ordered:
raise TypeError("""\
types_of_species cannot be used with disordered structures and partial occupancies.
Use OrderDisorderedStructureTransformation or EnumerateStructureTransformation
to build an appropriate supercell from partial occupancies.""")
# Cannot use set since we want a deterministic algorithm.
types = []
for site in self:
if site.specie not in types:
types.append(site.specie)
return types
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol: str) -> tuple:
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species)
if specie.symbol == symbol))
@property
def symbol_set(self):
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple((specie.symbol for specie in self.types_of_specie))
@property
def atomic_numbers(self):
"""List of atomic numbers."""
return [site.specie.number for site in self]
@property
def site_properties(self):
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {}
prop_keys = set()
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self):
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a np.array of the cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self):
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self):
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self):
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i: int, j: int, k: int) -> float:
"""
Returns angle specified by three sites.
Args:
i: Index of first site.
j: Index of second site.
k: Index of third site.
Returns:
Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
"""
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt: str = None, filename: str = None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
pass
@classmethod
@abstractmethod
def from_str(cls, input_string: str, fmt: str):
"""
Reads in SiteCollection from a string.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, filename: str):
"""
Reads in SiteCollection from a filename.
"""
pass
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self.sites):
raise ValueError("Values must be same length as sites.")
for site, val in zip(self.sites, values):
site.properties[property_name] = val
def remove_site_property(self, property_name):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
"""
for site in self.sites:
del site.properties[property_name]
def replace_species(self, species_mapping):
"""
Swap species.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
sp_to_replace = set(species_mapping.keys())
sp_in_structure = set(self.composition.keys())
if not sp_in_structure.issuperset(sp_to_replace):
warnings.warn(
"Some species to be substituted are not present in "
"structure. Pls check your input. Species to be "
"substituted = %s; Species in structure = %s"
% (sp_to_replace, sp_in_structure))
for site in self._sites:
if sp_to_replace.intersection(site.species):
c = Composition()
for sp, amt in site.species.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
site.species = c
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for site in self.sites:
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
site.species = new_sp
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
if len(oxidation_states) != len(self.sites):
raise ValueError("Oxidation states of all sites must be "
"specified.")
for site, ox in zip(self.sites, oxidation_states):
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Specie(sym, ox)] = occu
site.species = new_sp
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for el, occu in site.species.items():
sym = el.symbol
new_sp[Element(sym)] += occu
site.species = new_sp
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0])
def add_spin_by_element(self, spins):
"""
Add spin states to a structure.
Args:
spisn (dict): Dict of spins associated with
elements or species, e.g. {"Ni":+5} or {"Ni2+":5}
"""
for site in self.sites:
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu
site.species = new_sp
def add_spin_by_site(self, spins):
"""
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
"""
if len(spins) != len(self.sites):
raise ValueError("Spin of all sites must be "
"specified in the dictionary.")
for site, spin in zip(self.sites, spins):
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spin})] = occu
site.species = new_sp
def remove_spin(self):
"""
Removes spin states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for sp, occu in site.species.items():
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sp.symbol, oxidation_state=oxi_state)] += occu
site.species = new_sp
def extract_cluster(self, target_sites, **kwargs):
"""
Extracts a cluster of atoms based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
\\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
[Site/PeriodicSite] Cluster of atoms.
"""
cluster = list(target_sites)
others = [site for site in self if site not in cluster]
size = 0
while len(cluster) > size:
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return cluster
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(self, lattice: Lattice, species: list, coords: list,
charge: float = None, validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError("The list of atomic species must be of the"
" same length as the list of fractional"
" coordinates.")
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i]
for k, v in site_properties.items()}
sites.append(
PeriodicSite(species[i], coords[i], self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
@classmethod
def from_sites(cls, sites, charge=None, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell)
@classmethod
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None,
coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
frac_coords = np.array(coords, dtype=np.float) \
if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@classmethod
def from_magnetic_spacegroup(
cls, msg, lattice, species, coords, site_properties,
transform_setting=None, coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a magnetic spacegroup. Note that only
symmetrically distinct species, coords and magmoms should be provided.]
All equivalent sites are generated from the spacegroup operations.
Args:
msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):
The magnetic spacegroup.
If a string, it will be interpreted as one of the notations
supported by MagneticSymmetryGroup, e.g., "R-3'c" or "Fm'-3'm".
If a list of two ints, it will be interpreted as the number of
the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Unlike Structure.from_spacegroup(),
this argument is mandatory, since magnetic moment information
has to be included. Note that the *direction* of the supplied
magnetic moment relative to the crystal is important, even if
the resulting structure is used for collinear calculations.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
if 'magmom' not in site_properties:
raise ValueError('Magnetic moments have to be defined.')
else:
magmoms = [Magmom(m) for m in site_properties['magmom']]
if not isinstance(msg, MagneticSpaceGroup):
msg = MagneticSpaceGroup(msg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not msg.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
msg.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
if len(species) != len(magmoms):
raise ValueError(
"Supplied species and magmom lengths (%d vs %d) are "
"different!" % (len(species), len(magmoms))
)
frac_coords = coords if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
all_sp = []
all_coords = []
all_magmoms = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)):
cc, mm = msg.get_orbit(c, m, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
all_magmoms.extend(mm)
for k, v in site_properties.items():
if k != 'magmom':
all_site_properties[k].extend([v[i]] * len(cc))
all_site_properties['magmom'] = all_magmoms
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@property
def charge(self):
"""
Overall charge of the structure
"""
if self._charge is None:
return super(IStructure, self).charge
else:
return self._charge
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords)
@property
def sites(self):
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self):
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self):
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec,
angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other))
def __eq__(self, other):
if other is self:
return True
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix):
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(site.species, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None
return Structure.from_sites(new_sites, charge=new_charge)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i, j, jimage=None):
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species,
fcoord, self._lattice,
properties=self[i].properties)
# Get the neighbor data
nn_data = (nnsite, dist) if not include_index else (nnsite, dist, i)
if include_image:
nn_data += (img,)
neighbors.append(nn_data)
return neighbors
def get_neighbors(self, site, r, include_index=False, include_image=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
If include_index == True, the tuple for each neighbor also includes
the index of the neighbor.
If include_supercell == True, the tuple for each neighbor also includes
the index of supercell.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index,
include_image=include_image)
return [d for d in nn if site != d[0]]
def get_all_neighbors(self, r, include_index=False, include_image=False):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this operation
translates all atoms to within the unit cell (having fractional coordinates within [0,1)).
This means that the "image" of a site does not correspond to how much it has been
translates from its current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
Returns:
A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index) ...], ..]
Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
Image only supplied if include_image = True
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
neighbors = [list() for _ in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = latt.get_cartesian_coords(all_fcoords)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = latt.get_cartesian_coords(image) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
nnsite = PeriodicSite(self[j].species, coords[j],
latt, properties=self[j].properties,
coords_are_cartesian=True)
for i in indices[within_r]:
item = (nnsite, d[i], j) if include_index else (
nnsite, d[i])
# Add the image, if requested
if include_image:
item += (image,)
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(self, origin, r, dr, include_index=False, include_image=False):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
Returns:
[(site, dist, index) ...] since most of the time, subsequent
processing
requires the distance. Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
Image only supplied if include_image = True
"""
outer = self.get_sites_in_sphere(origin, r + dr,
include_index=include_index,
include_image=include_image)
inner = r - dr
return [t for t in outer if t[1] > inner]
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return self.__class__.from_sites(sites, charge=self._charge)
def get_reduced_structure(self, reduction_algo="niggli"):
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True,
site_properties=self.site_properties, charge=self._charge)
else:
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
charge=self._charge,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites, charge=self._charge)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True, autosort_tol=0):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int): No. of interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
# Check that both structures have the same species
for i in range(len(self)):
if self[i].species != end_structure[i].species:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords,
end_coords)
site_mappings = collections.defaultdict(list)
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. unmapped indices "
"= %s" % (autosort_tol, unmapped_start_ind))
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. More than one "
"site match!" % autosort_tol)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0]
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is unitary (rotation), p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T,
np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in range(nimages + 1):
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x / nimages * lvec, lstart).T
l = Lattice(l_a)
else:
l = self.lattice
fcoords = start_coords + x / nimages * vec
structs.append(self.__class__(l, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_miller_index_from_site_indexes(self, site_ids, round_dp=4,
verbose=True):
"""
Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
return self.lattice.get_miller_index_from_coords(
self.frac_coords[site_ids], coords_are_cartesian=False,
round_dp=round_dp, verbose=verbose)
def get_primitive_structure(self, tolerance=0.25, use_site_props=False,
constrain_latt=None):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
else:
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance, use_site_props=use_site_props,
constrain_latt=constrain_latt
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]):
return p
elif type(constrain_latt).__name__ == "dict":
if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]):
return p
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
from tabulate import tabulate
outs.append(tabulate(data, headers=["#", "SP", "a", "b", "c"] + keys,
))
return "\n".join(outs)
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d, fmt=None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
charge = d.get("charge", None)
return cls.from_sites(sites, charge=charge)
def to(self, fmt=None, filename=None, **kwargs):
"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
\\*\\*kwargs: Kwargs passthru to relevant methods. E.g., This allows
the passing of parameters like symprec to the
CifWriter.__init__ method for generation of symmetric cifs.
Returns:
(str) if filename is None. None otherwise.
"""
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname.lower(), "*.cif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, **kwargs)
elif fmt == "mcif" or fnmatch(fname.lower(), "*.mcif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, write_magmoms=True, **kwargs)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
from pymatgen.io.vasp import Poscar
writer = Poscar(self, **kwargs)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
from pymatgen.io.cssr import Cssr
writer = Cssr(self, **kwargs)
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write("%s" % s)
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
from pymatgen.io.xcrysden import XSF
s = XSF(self).to_string()
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
f.write(s)
return s
elif fmt == 'mcsqs' or fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
from pymatgen.io.atat import Mcsqs
s = Mcsqs(self).to_string()
if filename:
with zopen(fname, "wt", encoding='ascii') as f:
f.write(s)
return s
else:
import ruamel.yaml as yaml
if filename:
with zopen(filename, "wt") as f:
yaml.safe_dump(self.as_dict(), f)
return
else:
return yaml.safe_dump(self.as_dict())
if filename:
writer.write_file(filename)
else:
return writer.__str__()
@classmethod
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False,
read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
filename = str(filename)
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.lmto import LMTOCtrl
from pymatgen.io.vasp import Vasprun, Chgcar
from pymatgen.io.exciting import ExcitingInput
from monty.io import zopen
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*") or fnmatch(fname.lower(), "*.mcif*"):
return cls.from_str(contents, fmt="cif",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or fnmatch(fname, "*.vasp"):
s = cls.from_str(contents, fmt="poscar",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(contents, fmt="cssr",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "input*.xml"):
return ExcitingInput.from_file(fname).structure
elif fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
return cls.from_str(contents, fmt="mcsqs",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CTRL*"):
return LMTOCtrl.from_file(filename=filename).structure
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(self, species: list, coords: list, charge: float = 0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(("The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates."))
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Molecule contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
nelectrons = 0
for site in sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of %d and spin multiplicity of %d is"
" not possible for this molecule" %
(self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self):
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self):
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self):
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self):
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight = 0
for site in self:
wt = site.species.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self):
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(cls, sites, charge=0, spin_multiplicity=None,
validate_proximity=False):
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls([site.species for site in sites],
[site.coords for site in sites],
charge=charge, spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props)
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters)
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula (%s)" % self.composition.formula,
"Reduced Formula: " + self.composition.reduced_formula,
"Charge = %s, Spin Mult = %s" % (
self._charge, self._spin_multiplicity),
"Sites (%d)" % len(self)]
for i, site in enumerate(self):
outs.append(" ".join([str(i), site.species_string,
" ".join([("%0.6f" % j).rjust(12)
for j in site.coords])]))
return "\n".join(outs)
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)
def get_distance(self, i, j):
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt, r):
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
neighbors = []
for site in self._sites:
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append((site, dist))
return neighbors
def get_neighbors(self, site, r):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r)
return [(s, dist) for (s, dist) in nn if site != s]
def get_neighbors_in_shell(self, origin, r, dr):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [(site, dist) for (site, dist) in outer if dist > inner]
def get_boxed_structure(self, a, b, c, images=(1, 1, 1),
random_rotation=False, min_dist=1, cls=None,
offset=None, no_cross=False):
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0, 0, 0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1],
c * images[2],
90, 90, 90)
nimages = images[0] * images[1] * images[2]
coords = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(list(range(images[0])),
list(range(images[1])),
list(range(images[2]))):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0), axis=np.random.rand(3),
angle=random.uniform(-180, 180))
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(coords))
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()}
if cls is None:
cls = Structure
return cls(lattice, self.species * nimages, coords,
coords_are_cartesian=True,
site_properties=sprops).get_sorted_structure()
def get_centered_molecule(self):
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(self.species_and_occu, new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.babel import BabelMolAdaptor
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any([fmt == r or fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname,
"*.mson*"):
if filename:
with zopen(filename, "wt", encoding='utf8') as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
import ruamel.yaml as yaml
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
return yaml.safe_dump(self.as_dict(), f)
else:
return yaml.safe_dump(self.as_dict())
else:
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
else:
return str(writer)
@classmethod
def from_str(cls, input_string, fmt):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string,
file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
filename = str(filename)
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return cls.from_str(contents, fmt="g09")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
else:
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Unrecognized file extension!")
class Structure(IStructure, collections.abc.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None
def __init__(self, lattice: Lattice, species: list, coords: np.ndarray,
charge: float = None, validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super(Structure, self).__init__(
lattice, species, coords, charge=charge,
validate_proximity=validate_proximity, to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice "
"as Structure!")
elif len(indices) != 1:
raise ValueError("Site assignments makes sense only for "
"single int indices!")
self._sites[ii] = site
else:
if isinstance(site, str) or (
not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site
else:
self._sites[ii].species = site[0]
if len(site) > 1:
self._sites[ii].frac_coords = site[1]
if len(site) > 2:
self._sites[ii].properties = site[2]
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
@property
def lattice(self):
return self._lattice
@lattice.setter
def lattice(self, lattice):
self._lattice = lattice
for site in self._sites:
site.lattice = lattice
def append(self, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(len(self), species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties)
def insert(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def replace(self, i, species, coords=None, coords_are_cartesian=False,
properties=None):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
self._sites[i] = new_site
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
try:
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
# Catches for case of incompatibility between Element(s) and Specie(s)
except TypeError:
bl = None
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
s_new = PeriodicSite(site.species, site.coords,
self.lattice, coords_are_cartesian=True)
self._sites.append(s_new)
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites)
if i not in indices]
def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species, new_frac,
self._lattice,
properties=site.properties)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
@deprecated(message="Simply set using Structure.lattice = lattice. This will be removed in pymatgen v2020.")
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
"""
self._lattice = new_lattice
for site in self._sites:
site.lattice = new_lattice
def apply_strain(self, strain):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def sort(self, key=None, reverse=False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites.sort(key=key, reverse=reverse)
def translate_sites(self, indices, vector, frac_coords=True,
to_unit_cell=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.abc.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(
site.coords + vector)
if to_unit_cell:
fcoords = np.mod(fcoords, 1)
self._sites[i].frac_coords = fcoords
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,
to_unit_cell=True):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()
new_site = PeriodicSite(
site.species, coords, self._lattice,
to_unit_cell=to_unit_cell, coords_are_cartesian=True,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def make_supercell(self, scaling_matrix, to_unit_cell=True):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self * scaling_matrix
if to_unit_cell:
for site in s:
site.to_unit_cell(in_place=True)
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.lattice = self._lattice.scale(volume)
def merge_sites(self, tol=0.01, mode="sum"):
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode (str): Three modes supported. "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
"average" means that the site is deleted but the properties are averaged
Only first letter is considered.
"""
mode = mode.lower()[0]
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)),
tol, 'distance')
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species
coords = self[inds[0]].frac_coords
props = self[inds[0]].properties
for n, i in enumerate(inds[1:]):
sp = self[i].species
if mode == "s":
species += sp
offset = self[i].frac_coords - coords
coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(
coords.dtype)
for key in props.keys():
if props[key] is not None and self[i].properties[key] != props[key]:
if mode == 'a' and isinstance(props[key], float):
# update a running total
props[key] = props[key]*(n+1)/(n+2) + self[i].properties[key]/(n+2)
else:
props[key] = None
warnings.warn("Sites with different site property %s are merged. "
"So property is set to none" % key)
sites.append(PeriodicSite(species, coords, self.lattice, properties=props))
self._sites = sites
def set_charge(self, new_charge: float = 0.):
"""
Sets the overall structure charge
Args:
new_charge (float): new charge to set
"""
self._charge = new_charge
class Molecule(IMolecule, collections.abc.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None
def __init__(self, species: list, coords: list, charge: float = 0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None):
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
super(Molecule, self).__init__(species, coords, charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Specie-like string/object, or finally a (Specie,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, str) or (
not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site
else:
self._sites[ii].species = site[0]
if len(site) > 1:
self._sites[ii].coords = site[1]
if len(site) > 2:
self._sites[ii].properties = site[2]
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, validate_proximity=True, properties=None):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(len(self), species, coords,
validate_proximity=validate_proximity,
properties=properties)
def set_charge_and_spin(self, charge, spin_multiplicity=None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0
for site in self._sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def insert(self, i, species, coords, validate_proximity=False,
properties=None):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def remove_species(self, species):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def translate_sites(self, indices=None, vector=None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector == [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species, site.coords + vector,
properties=site.properties)
self._sites[i] = new_site
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((np.dot(rm, (site.coords - anchor).T)).T + anchor).ravel()
new_site = Site(site.species, s,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species, new_cart,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return self.__class__.from_sites(self)
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
with open(os.path.join(os.path.dirname(__file__),
"func_groups.json"), "rt") as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"])
for k, v in json.load(f).items()}
|
montoyjh/pymatgen
|
pymatgen/core/structure.py
|
Python
|
mit
| 141,301
|
[
"ABINIT",
"CRYSTAL",
"Gaussian",
"NetCDF",
"VASP",
"exciting",
"pymatgen"
] |
533e21c4e20a400f58c6754cc4123d770884e1377f11c0c9446ecc4be1438412
|
"""
Use Scanorama to integrate cells from different experiments.
"""
from anndata import AnnData
import numpy as np
def scanorama_integrate(
adata: AnnData,
key: str,
basis: str = 'X_pca',
adjusted_basis: str = 'X_scanorama',
knn: int = 20,
sigma: float = 15,
approx: bool = True,
alpha: float = 0.10,
batch_size: int = 5000,
**kwargs,
):
"""\
Use Scanorama [Hie19]_ to integrate different experiments.
Scanorama [Hie19]_ is an algorithm for integrating single-cell
data from multiple experiments stored in an AnnData object. This
function should be run after performing PCA but before computing
the neighbor graph, as illustrated in the example below.
This uses the implementation of `scanorama
<https://github.com/brianhie/scanorama>`__ [Hie19]_.
Parameters
----------
adata
The annotated data matrix.
key
The name of the column in ``adata.obs`` that differentiates
among experiments/batches. Cells from the same batch must be
contiguously stored in ``adata``.
basis
The name of the field in ``adata.obsm`` where the PCA table is
stored. Defaults to ``'X_pca'``, which is the default for
``sc.tl.pca()``.
adjusted_basis
The name of the field in ``adata.obsm`` where the integrated
embeddings will be stored after running this function. Defaults
to ``X_scanorama``.
knn
Number of nearest neighbors to use for matching.
sigma
Correction smoothing parameter on Gaussian kernel.
approx
Use approximate nearest neighbors with Python ``annoy``;
greatly speeds up matching runtime.
alpha
Alignment score minimum cutoff.
batch_size
The batch size used in the alignment vector computation. Useful
when integrating very large (>100k samples) datasets. Set to
large value that runs within available memory.
kwargs
Any additional arguments will be passed to
``scanorama.integrate()``.
Returns
-------
Updates adata with the field ``adata.obsm[adjusted_basis]``,
containing Scanorama embeddings such that different experiments
are integrated.
Example
-------
First, load libraries and example dataset, and preprocess.
>>> import scanpy as sc
>>> import scanpy.external as sce
>>> adata = sc.datasets.pbmc3k()
>>> sc.pp.recipe_zheng17(adata)
>>> sc.tl.pca(adata)
We now arbitrarily assign a batch metadata variable to each cell
for the sake of example, but during real usage there would already
be a column in ``adata.obs`` giving the experiment each cell came
from.
>>> adata.obs['batch'] = 1350*['a'] + 1350*['b']
Finally, run Scanorama. Afterwards, there will be a new table in
``adata.obsm`` containing the Scanorama embeddings.
>>> sce.pp.scanorama_integrate(adata, 'batch')
>>> 'X_scanorama' in adata.obsm
True
"""
try:
import scanorama
except ImportError:
raise ImportError("\nplease install Scanorama:\n\n\tpip install scanorama")
# Get batch indices in linear time.
curr_batch = None
batch_names = []
name2idx = {}
for idx in range(adata.X.shape[0]):
batch_name = adata.obs[key][idx]
if batch_name != curr_batch:
curr_batch = batch_name
if batch_name in batch_names:
# Contiguous batches important for preserving cell order.
raise ValueError('Detected non-contiguous batches.')
batch_names.append(batch_name) # Preserve name order.
name2idx[batch_name] = []
name2idx[batch_name].append(idx)
# Separate batches.
datasets_dimred = [
adata.obsm[basis][name2idx[batch_name]] for batch_name in batch_names
]
# Integrate.
integrated = scanorama.assemble(
datasets_dimred, # Assemble in low dimensional space.
knn=knn,
sigma=sigma,
approx=approx,
alpha=alpha,
ds_names=batch_names,
**kwargs,
)
adata.obsm[adjusted_basis] = np.concatenate(integrated)
|
theislab/scanpy
|
scanpy/external/pp/_scanorama_integrate.py
|
Python
|
bsd-3-clause
| 4,175
|
[
"Gaussian"
] |
13d93058aab82753d90542ee40c717d2f4a564c8b6d1612d51a224aa8c1eb5bc
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement
import functools
import logging
import threading
import time
try:
from collections import namedtuple
except ImportError:
from third_party.py.concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
STDERR_HANDLER = logging.StreamHandler()
LOGGER.addHandler(STDERR_HANDLER)
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
Digas29/bazel
|
third_party/py/concurrent/futures/_base.py
|
Python
|
apache-2.0
| 19,681
|
[
"Brian"
] |
4ff740dabc310b2d2d062f56b03d1fee6fee2da2729bdeeb20246ccf5669543f
|
"""
Migration script to add the metadata, update_available and includes_datatypes columns to the tool_shed_repository table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def get_default_false(migrate_engine):
if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
return "0"
elif migrate_engine.name in ['postgresql', 'postgres']:
return "false"
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
c = Column( "metadata", JSONType(), nullable=True )
try:
c.create( ToolShedRepository_table )
assert c is ToolShedRepository_table.c.metadata
except Exception, e:
print "Adding metadata column to the tool_shed_repository table failed: %s" % str( e )
log.debug( "Adding metadata column to the tool_shed_repository table failed: %s" % str( e ) )
c = Column( "includes_datatypes", Boolean, index=True, default=False )
try:
c.create( ToolShedRepository_table, index_name="ix_tool_shed_repository_includes_datatypes")
assert c is ToolShedRepository_table.c.includes_datatypes
migrate_engine.execute( "UPDATE tool_shed_repository SET includes_datatypes=%s" % get_default_false(migrate_engine))
except Exception, e:
print "Adding includes_datatypes column to the tool_shed_repository table failed: %s" % str( e )
log.debug( "Adding includes_datatypes column to the tool_shed_repository table failed: %s" % str( e ) )
c = Column( "update_available", Boolean, default=False )
try:
c.create( ToolShedRepository_table )
assert c is ToolShedRepository_table.c.update_available
migrate_engine.execute( "UPDATE tool_shed_repository SET update_available=%s" % get_default_false(migrate_engine))
except Exception, e:
print "Adding update_available column to the tool_shed_repository table failed: %s" % str( e )
log.debug( "Adding update_available column to the tool_shed_repository table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
try:
ToolShedRepository_table.c.metadata.drop()
except Exception, e:
print "Dropping column metadata from the tool_shed_repository table failed: %s" % str( e )
log.debug( "Dropping column metadata from the tool_shed_repository table failed: %s" % str( e ) )
try:
ToolShedRepository_table.c.includes_datatypes.drop()
except Exception, e:
print "Dropping column includes_datatypes from the tool_shed_repository table failed: %s" % str( e )
log.debug( "Dropping column includes_datatypes from the tool_shed_repository table failed: %s" % str( e ) )
try:
ToolShedRepository_table.c.update_available.drop()
except Exception, e:
print "Dropping column update_available from the tool_shed_repository table failed: %s" % str( e )
log.debug( "Dropping column update_available from the tool_shed_repository table failed: %s" % str( e ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0086_add_tool_shed_repository_table_columns.py
|
Python
|
gpl-3.0
| 3,763
|
[
"Galaxy"
] |
9b44d2541c265997e8aa373448a5673de0ad1cb60b517d31551fd54a2b237c0a
|
../../../../../../../share/pyshared/orca/scripts/apps/gnome-system-monitor/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gnome-system-monitor/__init__.py
|
Python
|
gpl-3.0
| 86
|
[
"ORCA"
] |
8b9e383d5ee45b2b57ec2001258cd15d674123c55b052f2126e9a59caedbbcd7
|
# -*- coding: iso-8859-1 -*-
'''Module for creating the requested output files.
'''
'''
orbkit
Gunter Hermann, Vincent Pohl, Lukas Eugen Marsoner Steinkasserer, Axel Schild, and Jean Christophe Tremblay
Institut fuer Chemie und Biochemie, Freie Universitaet Berlin, 14195 Berlin, Germany
This file is part of orbkit.
orbkit is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or any later version.
orbkit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with orbkit. If not, see <http://www.gnu.org/licenses/>.
'''
# Import general modules
import numpy
import h5py
from os import path
# Import orbkit modules
from orbkit import grid, options
from orbkit.display import display
from orbkit.units import a0_to_aa
from .amira import amira_creator, hx_network_creator
from .cube import cube_creator
from .obj import obj_creator
from .hdf5 import hdf5_creator, hdf5_append, hdf5_write
from .mayavi_interface import view_with_mayavi
from .pdb import pdb_creator
from .vmd import vmd_network_creator
from .xyz import xyz_creator
from .native import write_native
synonyms = {'auto':'auto',
'h5':'h5', 'hdf5':'h5',
'npz':'npz','numpy':'npz',
'cube':'cube', 'cb':'cube',
'cube.gz':'cube', 'cb.gz':'cube',
'obj':'obj', 'obj.gz':'obj',
'am':'am',
'hx':'hx',
'vmd':'vmd',
'mayavi':'mayavi',
'':None
}
def main_output(data,qc=None,outputname='data',otype='auto',gname='',
drv=None,omit=[],datalabels='',dataindices=None,mode='w',**kwargs):
'''Creates the requested output.
**Parameters:**
data : numpy.ndarray, shape=N, shape=((NDRV,) + N), shape=(n, (NDRV,) + N) or list of numpy.ndarrays
Contains the output data. The shape (N) depends on the grid and the data, i.e.,
3d for regular grid, 1d for vector grid.
qc : class or dict
QCinfo class or dictionary containing the following attributes/keys.
See :ref:`Central Variables` for details.
outputname : str or list of str
Contains the base name of the output file. If outputname contains @, string will be split and first
part interpreted as outputname and second as gname (cf. Parameters:gname).
otype : str or list of str, optional
Contains the output file type. Possible options:
'auto', 'h5', 'cb', 'am', 'hx', 'vmd', 'mayavi', 'obj'
If otype='native', a native input file will be written, the type of which may be specifie by
ftype='numpy'.
gname : str, optional
For native, HDF5, or npz output, specifies the group, where the data will be stored.
drv : None, list of str or list of list of str, optional
If not None, a 4d(regular)/2d(vector) input data array will be expected
with NDRV = len(drv). Specifies the file labels, i.e. e.g., data_d{drv}.cube for 4d array.
For 5d arrays i.e., data_0_d{drv}.cube
datalabels : list of str, optional
If not empty, the output file types specified here are omitted.
dataindices : list of int, optional
If not empty, the output file types specified here are omitted.
omit : list of str, optional
If not empty, the output file types specified here are omitted.
mode : str={'r', 'w', 'a'}, optional
Specifies the mode used to open the file (native, HDF5, or npz).
**Note:**
All additional keyword arguments are forwarded to the output functions.
'''
if otype is None or otype == []:
return []
if isinstance(outputname, str):
if '@' in outputname:
outputname,gname = outputname.split('@')
if isinstance(otype, str):
if otype == 'auto':
outputname, otype = path.splitext(outputname)
otype = otype[1:]
otype = [otype]
elif isinstance(otype, list) and len(otype) == 1:
if otype[0] == 'auto':
outputname, otype = path.splitext(outputname)
otype = [otype[1:]]
else:
for iot in range(len(otype)):
if otype[iot] == 'auto':
outputname, tmp = path.splitext(outputname)
if tmp != '':
otype[iot] = tmp[1:]
# Catch our native format before all else
# We can't figure this out by the file ending alone
# as we support hdf5 for output of both grid-based data
# as well as our internal format
output_written = []
internals = [i for i in range(len(otype)) if otype[i] == 'native']
if len(internals) > 0:
if not isinstance(data,list):
data = [data]
if isinstance(outputname,str):
outputname = [outputname for _ in data]
if 'ftype' in kwargs.keys():
if isinstance(kwargs['ftype'],str):
ftype = [kwargs['ftype'] for _ in data]
else:
ftype = ['numpy' for _ in data]
if 'group' in kwargs.keys():
if isinstance(kwargs['group'],str):
group = [kwargs['group'] for _ in data]
else:
group = [i.__class__.__name__.lower() for i in data]
display('Writing native input file...' )
for i, oname in enumerate(outputname):
output_written.append(write_native(data[i], oname, ftype[i], mode=mode,
gname=path.join(gname,group[i])))
display('\n'.join(['\t' + i for i in output_written]))
else:
print_warning = False
output_not_possible = (grid.is_vector and not grid.is_regular)
# Shape shall be (Ndrv,Ndata,Nx,Ny,Nz) or (Ndrv,Ndata,Nxyz)
data = numpy.array(data)
dims = 1 if grid.is_vector else 3
shape = data.shape
if drv is not None and isinstance(drv,str):
drv = [drv]
if data.ndim < dims:
output_not_possible = True
display('data.ndim < ndim of grid')
elif data.ndim == dims: # 3d data set
data = data[numpy.newaxis,numpy.newaxis]
elif data.ndim == dims + 1: # 4d data set
if drv is not None:
data = data[:,numpy.newaxis]
else:
data = data[numpy.newaxis]
elif data.ndim == dims + 2: # 5d data set check if drv matches Ndrv
if drv is None or len(drv) != data.shape[0]:
drv = list(range(data.shape[0]))
elif data.ndim > dims + 2:
output_not_possible = True
display('data.ndim > (ndim of grid) +2')
if 'vmd' in otype and not ('cb' in otype or 'cube' in otype):
otype.append('cube')
if 'hx' in otype and not 'am' in otype:
otype.append('am')
otype = [i for i in otype if i not in omit]
otype_synonyms = [synonyms[i] for i in otype]
otype_ext = dict(zip(otype_synonyms,otype))
# Convert the data to a regular grid, if possible
is_regular_vector = (grid.is_vector and grid.is_regular)
if is_regular_vector:
display('\nConverting the regular 1d vector grid to a 3d regular grid.')
grid.vector2grid(*grid.N_)
data = numpy.array(grid.mv2g(data))
isstr = isinstance(outputname, str)
if isinstance(datalabels, str):
if data.shape[1] > 1:
datalabels = numpy.array([str(idata) + ',' + datalabels
for idata in range(data.shape[1])])
else:
datalabels = numpy.array([datalabels])
elif isinstance(datalabels, list):
datalabels = numpy.array(datalabels)
if drv is not None:
fid = '%(f)s_d%(d)s.'
datalabel_id = 'd/d%(d)s %(f)s'
contents = {
'axis:0': numpy.array(['d/d%s' % i if i is not None else str(i)
for i in drv]),
'axis:1': datalabels}
it = enumerate(drv)
elif data.shape[0] > 1:
fid = '%(f)s_%(d)s.'
datalabel_id = '%(d)s %(f)s'
it = enumerate(data.shape[0])
contents = {
'axis:0': numpy.arange(data.shape[0]).astype(str),
'axis:1': datalabels}
else:
fid = '%(f)s.'
datalabel_id = '%(f)s'
it = [(0,None)]
if data.shape[1] > 1:
contents = {'axis:0': datalabels}
else:
contents = datalabels
cube_files = []
all_datalabels = []
for idrv,jdrv in it:
datasetlabels = []
for idata in range(data.shape[1]):
if isstr:
index = str(idata) if dataindices is None else str(dataindices[idata])
f = {'f': outputname + '_' + index if data.shape[1] > 1 else outputname,
'd':jdrv}
else:
f = {'f': outputname[idata], 'd':jdrv}
c = {'f': datalabels[idata],'d':jdrv}
datalabel = datalabel_id%c
datasetlabels.append(datalabel)
if 'am' in otype_synonyms and not print_warning:
if output_not_possible: print_warning = True
else:
filename = fid % f + otype_ext['am']
display('\nSaving to ZIBAmiraMesh file...\n\t' + filename)
amira_creator(data[idrv,idata],filename)
output_written.append(filename)
if 'hx' in otype_synonyms and not print_warning:
if output_not_possible: print_warning = True
else:
filename = fid % f + otype_ext['hx']
display('\nCreating ZIBAmira network file...\n\t' + filename)
hx_network_creator(data[idrv,idata],filename)
output_written.append(filename)
if 'cube' in otype_synonyms and not print_warning:
if output_not_possible: print_warning = True
elif qc is None:
display('\nFor cube file output `qc` is a required keyword parameter in `main_output`.')
else:
filename = fid % f + otype_ext['cube']
display('\nSaving to cube file...\n\t' + filename)
cube_creator(data[idrv,idata],filename,qc.geo_info,qc.geo_spec,
comments=datalabel,
**kwargs)
output_written.append(filename)
cube_files.append(filename)
if 'obj' in otype_synonyms and not print_warning:
if output_not_possible: print_warning = True
elif qc is None:
display('\nFor obj file output `qc` is a required keyword parameter in `main_output`.')
else:
filename = fid % f + otype_ext['obj']
display('\nSaving to obj file...\n\t' + filename)
obj_creator(data[idrv,idata],filename,qc.geo_info,qc.geo_spec,
**kwargs)
output_written.append(filename)
all_datalabels.extend(datasetlabels)
if 'vmd' in otype_synonyms and not print_warning:
if output_not_possible: print_warning = True
else:
filename = (outputname if isstr else outputname[-1]) +'.'+ otype_ext['vmd']
display('\nCreating VMD network file...\n\t' + filename)
vmd_network_creator(filename,cube_files=cube_files,**kwargs)
output_written.append(filename)
if 'h5' in otype_synonyms:
filename = (outputname if isstr else outputname[-1]) +'.'+ otype_ext['h5']
display('\nSaving to Hierarchical Data Format file (HDF5)...\n\t' + filename)
hdf5_creator(data.reshape(shape),filename,qcinfo=qc,gname=gname,
ftype='hdf5',contents=contents,mode=mode,**kwargs)
output_written.append(filename)
if 'npz' in otype_synonyms:
filename = (outputname if isstr else outputname[-1])
display('\nSaving to a compressed .npz archive...\n\t' + filename+'.npz')
hdf5_creator(data.reshape(shape),filename,qcinfo=qc,gname=gname,
ftype='numpy',contents=contents,mode=mode,**kwargs)
output_written.append(filename)
if 'mayavi' in otype_synonyms:
if output_not_possible: print_warning = True
else:
display('\nDepicting the results with MayaVi...\n\t')
if drv == ['x','y','z'] or drv == [0,1,2]:
is_vectorfield = True
data = numpy.swapaxes(data,0,1)
datalabels = datalabels
else:
is_vectorfield = False
data = data.reshape((-1,)+grid.get_shape())
datalabels = all_datalabels
view_with_mayavi(grid.x,grid.y,grid.z,
data,
is_vectorfield=is_vectorfield,
geo_spec=qc.geo_spec,
datalabels=datalabels,**kwargs)
if print_warning:
display('For a non-regular vector grid (`if grid.is_vector and not grid.is_regular`)')
display('only HDF5 is available as output format...')
display('Skipping all other formats...')
if is_regular_vector:
# Convert the data back to a regular vector grid
grid.grid2vector()
return output_written
|
orbkit/orbkit
|
orbkit/output/high_level.py
|
Python
|
lgpl-3.0
| 12,902
|
[
"Mayavi",
"VMD"
] |
0847d8c2a5742db20d6043932ec31529fb455b235812133aef066aab50af5b18
|
####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2018 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
"""Module to define colours from several sets.
"""
####################################################################################################
# from Matplotlib, Valentina, Qt
__all__ = [
'BASE_COLORS',
'TABLEAU_COLORS',
'XKCD_COLORS',
'CSS4_COLORS',
'QML_COLORS',
'VALENTINA_COLORS',
]
####################################################################################################
# aqua #00ffff vs #13eac9
# aquamarine #7fffd4 vs #04d8b2
# azure #f0ffff vs #069af3
# beige #f5f5dc vs #e6daa6
# blue #0343df vs #0000ff
# blue #1f77b4 vs #0000ff
# brown #653700 vs #8c564b
# brown #a52a2a vs #8c564b
# chartreuse #7fff00 vs #c1f80a
# chocolate #d2691e vs #3d1c02
# coral #ff7f50 vs #fc5a50
# crimson #dc143c vs #8c000f
# cyan #17becf vs #00ffff
# darkblue #00008b vs #030764
# darkgreen #006400 vs #054907
# fuchsia #ff00ff vs #ed0dd9
# goldenrod #daa520 vs #fac205
# gold #ffd700 vs #dbb40c
# gray #808080 vs #7f7f7f
# green #008000 vs #00ff00
# green #15b01a vs #00ff00
# green #2ca02c vs #00ff00
# grey #808080 vs #929591
# indigo #4b0082 vs #380282
# ivory #fffff0 vs #ffffcb
# khaki #f0e68c vs #aaa662
# lavender #e6e6fa vs #c79fef
# lightblue #add8e6 vs #7bc8f6
# lightgreen #90ee90 vs #76ff7b
# lime #00ff00 vs #aaff32
# magenta #c20078 vs #ff00ff
# maroon #800000 vs #650021
# navy #000080 vs #01153e
# olive #6e750e vs #bcbd22
# olive #808000 vs #bcbd22
# orange #f97306 vs #ff7f0e
# orange #ffa500 vs #ff7f0e
# orangered #ff4500 vs #fe420f
# orchid #da70d6 vs #c875c4
# pink #ff81c0 vs #e377c2
# pink #ffc0cb vs #e377c2
# plum #dda0dd vs #580f41
# purple #7e1e9c vs #9467bd
# purple #800080 vs #9467bd
# red #d62728 vs #ff0000
# red #e50000 vs #ff0000
# salmon #fa8072 vs #ff796c
# sienna #a0522d vs #a9561e
# silver #c0c0c0 vs #c5c9c7
# tan #d2b48c vs #d1b26f
# teal #008080 vs #029386
# tomato #ff6347 vs #ef4026
# turquoise #40e0d0 vs #06c2ac
# violet #ee82ee vs #9a0eea
# wheat #f5deb3 vs #fbdd7e
# yellow #ffff14 vs #ffff00
# yellowgreen #9acd32 vs #bbf90f
####################################################################################################
BASE_COLORS = {
'black': '#000000',
'blue': '#0000ff',
'green': '#00ff00',
'cyan': '#00ffff',
'red': '#ff0000',
'magenta': '#ff00ff',
'yellow': '#ffff00',
'white': '#ffffff',
}
####################################################################################################
# These colors are from Tableau
TABLEAU_COLORS = {
'blue': '#1f77b4',
'brown': '#8c564b',
'cyan': '#17becf',
'gray': '#7f7f7f',
'green': '#2ca02c',
'olive': '#bcbd22',
'orange': '#ff7f0e',
'pink': '#e377c2',
'purple': '#9467bd',
'red': '#d62728',
}
####################################################################################################
# This mapping of color names -> hex values is taken from
# a survey run by Randel Monroe see:
# http://blog.xkcd.com/2010/05/03/color-survey-results/
# for more details. The results are hosted at
# https://xkcd.com/color/rgb.txt
#
# License: http://creativecommons.org/publicdomain/zero/1.0/
XKCD_COLORS = {
"robin's egg blue": '#98eff9',
"robin's egg": '#6dedfd',
'acid green': '#8ffe09',
'adobe': '#bd6c48',
'algae green': '#21c36f',
'algae': '#54ac68',
'almost black': '#070d0d',
'amber': '#feb308',
'amethyst': '#9b5fc0',
'apple green': '#76cd26',
'apple': '#6ecb3c',
'apricot': '#ffb16d',
'aqua blue': '#02d8e9',
'aqua green': '#12e193',
'aqua marine': '#2ee8bb',
'aqua': '#13eac9',
'aquamarine': '#04d8b2',
'army green': '#4b5d16',
'asparagus': '#77ab56',
'aubergine': '#3d0734',
'auburn': '#9a3001',
'avocado green': '#87a922',
'avocado': '#90b134',
'azul': '#1d5dec',
'azure': '#069af3',
'baby blue': '#a2cffe',
'baby green': '#8cff9e',
'baby pink': '#ffb7ce',
'baby poo': '#ab9004',
'baby poop green': '#8f9805',
'baby poop': '#937c00',
'baby puke green': '#b6c406',
'baby purple': '#ca9bf7',
'baby shit brown': '#ad900d',
'baby shit green': '#889717',
'banana yellow': '#fafe4b',
'banana': '#ffff7e',
'barbie pink': '#fe46a5',
'barf green': '#94ac02',
'barney purple': '#a00498',
'barney': '#ac1db8',
'battleship grey': '#6b7c85',
'beige': '#e6daa6',
'berry': '#990f4b',
'bile': '#b5c306',
'black': '#000000',
'bland': '#afa88b',
'blood orange': '#fe4b03',
'blood red': '#980002',
'blood': '#770001',
'blue blue': '#2242c7',
'blue green': '#137e6d',
'blue grey': '#607c8e',
'blue purple': '#5729ce',
'blue violet': '#5d06e9',
'blue with a hint of purple': '#533cc6',
'blue': '#0343df',
'blue/green': '#0f9b8e',
'blue/grey': '#758da3',
'blue/purple': '#5a06ef',
'blueberry': '#464196',
'bluegreen': '#017a79',
'bluegrey': '#85a3b2',
'bluey green': '#2bb179',
'bluey grey': '#89a0b0',
'bluey purple': '#6241c7',
'bluish green': '#10a674',
'bluish grey': '#748b97',
'bluish purple': '#703be7',
'bluish': '#2976bb',
'blurple': '#5539cc',
'blush pink': '#fe828c',
'blush': '#f29e8e',
'booger green': '#96b403',
'booger': '#9bb53c',
'bordeaux': '#7b002c',
'boring green': '#63b365',
'bottle green': '#044a05',
'brick orange': '#c14a09',
'brick red': '#8f1402',
'brick': '#a03623',
'bright aqua': '#0bf9ea',
'bright blue': '#0165fc',
'bright cyan': '#41fdfe',
'bright green': '#01ff07',
'bright lavender': '#c760ff',
'bright light blue': '#26f7fd',
'bright light green': '#2dfe54',
'bright lilac': '#c95efb',
'bright lime green': '#65fe08',
'bright lime': '#87fd05',
'bright magenta': '#ff08e8',
'bright olive': '#9cbb04',
'bright orange': '#ff5b00',
'bright pink': '#fe01b1',
'bright purple': '#be03fd',
'bright red': '#ff000d',
'bright sea green': '#05ffa6',
'bright sky blue': '#02ccfe',
'bright teal': '#01f9c6',
'bright turquoise': '#0ffef9',
'bright violet': '#ad0afd',
'bright yellow green': '#9dff00',
'bright yellow': '#fffd01',
'british racing green': '#05480d',
'bronze': '#a87900',
'brown green': '#706c11',
'brown grey': '#8d8468',
'brown orange': '#b96902',
'brown red': '#922b05',
'brown yellow': '#b29705',
'brown': '#653700',
'brownish green': '#6a6e09',
'brownish grey': '#86775f',
'brownish orange': '#cb7723',
'brownish pink': '#c27e79',
'brownish purple': '#76424e',
'brownish red': '#9e3623',
'brownish yellow': '#c9b003',
'brownish': '#9c6d57',
'browny green': '#6f6c0a',
'browny orange': '#ca6b02',
'bruise': '#7e4071',
'bubble gum pink': '#ff69af',
'bubblegum pink': '#fe83cc',
'bubblegum': '#ff6cb5',
'buff': '#fef69e',
'burgundy': '#610023',
'burnt orange': '#c04e01',
'burnt red': '#9f2305',
'burnt siena': '#b75203',
'burnt sienna': '#b04e0f',
'burnt umber': '#a0450e',
'burnt yellow': '#d5ab09',
'burple': '#6832e3',
'butter yellow': '#fffd74',
'butter': '#ffff81',
'butterscotch': '#fdb147',
'cadet blue': '#4e7496',
'camel': '#c69f59',
'camo green': '#526525',
'camo': '#7f8f4e',
'camouflage green': '#4b6113',
'canary yellow': '#fffe40',
'canary': '#fdff63',
'candy pink': '#ff63e9',
'caramel': '#af6f09',
'carmine': '#9d0216',
'carnation pink': '#ff7fa7',
'carnation': '#fd798f',
'carolina blue': '#8ab8fe',
'celadon': '#befdb7',
'celery': '#c1fd95',
'cement': '#a5a391',
'cerise': '#de0c62',
'cerulean blue': '#056eee',
'cerulean': '#0485d1',
'charcoal grey': '#3c4142',
'charcoal': '#343837',
'chartreuse': '#c1f80a',
'cherry red': '#f7022a',
'cherry': '#cf0234',
'chestnut': '#742802',
'chocolate brown': '#411900',
'chocolate': '#3d1c02',
'cinnamon': '#ac4f06',
'claret': '#680018',
'clay brown': '#b2713d',
'clay': '#b66a50',
'clear blue': '#247afd',
'cloudy blue': '#acc2d9',
'cobalt blue': '#030aa7',
'cobalt': '#1e488f',
'cocoa': '#875f42',
'coffee': '#a6814c',
'cool blue': '#4984b8',
'cool green': '#33b864',
'cool grey': '#95a3a6',
'copper': '#b66325',
'coral pink': '#ff6163',
'coral': '#fc5a50',
'cornflower blue': '#5170d7',
'cornflower': '#6a79f7',
'cranberry': '#9e003a',
'cream': '#ffffc2',
'creme': '#ffffb6',
'crimson': '#8c000f',
'custard': '#fffd78',
'cyan': '#00ffff',
'dandelion': '#fedf08',
'dark aqua': '#05696b',
'dark aquamarine': '#017371',
'dark beige': '#ac9362',
'dark blue green': '#005249',
'dark blue grey': '#1f3b4d',
'dark blue': '#00035b',
'dark brown': '#341c02',
'dark coral': '#cf524e',
'dark cream': '#fff39a',
'dark cyan': '#0a888a',
'dark forest green': '#002d04',
'dark fuchsia': '#9d0759',
'dark gold': '#b59410',
'dark grass green': '#388004',
'dark green blue': '#1f6357',
'dark green': '#033500',
'dark grey blue': '#29465b',
'dark grey': '#363737',
'dark hot pink': '#d90166',
'dark indigo': '#1f0954',
'dark khaki': '#9b8f55',
'dark lavender': '#856798',
'dark lilac': '#9c6da5',
'dark lime green': '#7ebd01',
'dark lime': '#84b701',
'dark magenta': '#960056',
'dark maroon': '#3c0008',
'dark mauve': '#874c62',
'dark mint green': '#20c073',
'dark mint': '#48c072',
'dark mustard': '#a88905',
'dark navy blue': '#00022e',
'dark navy': '#000435',
'dark olive green': '#3c4d03',
'dark olive': '#373e02',
'dark orange': '#c65102',
'dark pastel green': '#56ae57',
'dark peach': '#de7e5d',
'dark periwinkle': '#665fd1',
'dark pink': '#cb416b',
'dark plum': '#3f012c',
'dark purple': '#35063e',
'dark red': '#840000',
'dark rose': '#b5485d',
'dark royal blue': '#02066f',
'dark sage': '#598556',
'dark salmon': '#c85a53',
'dark sand': '#a88f59',
'dark sea green': '#11875d',
'dark seafoam green': '#3eaf76',
'dark seafoam': '#1fb57a',
'dark sky blue': '#448ee4',
'dark slate blue': '#214761',
'dark tan': '#af884a',
'dark taupe': '#7f684e',
'dark teal': '#014d4e',
'dark turquoise': '#045c5a',
'dark violet': '#34013f',
'dark yellow green': '#728f02',
'dark yellow': '#d5b60a',
'dark': '#1b2431',
'darkblue': '#030764',
'darkgreen': '#054907',
'darkish blue': '#014182',
'darkish green': '#287c37',
'darkish pink': '#da467d',
'darkish purple': '#751973',
'darkish red': '#a90308',
'deep aqua': '#08787f',
'deep blue': '#040273',
'deep brown': '#410200',
'deep green': '#02590f',
'deep lavender': '#8d5eb7',
'deep lilac': '#966ebd',
'deep magenta': '#a0025c',
'deep orange': '#dc4d01',
'deep pink': '#cb0162',
'deep purple': '#36013f',
'deep red': '#9a0200',
'deep rose': '#c74767',
'deep sea blue': '#015482',
'deep sky blue': '#0d75f8',
'deep teal': '#00555a',
'deep turquoise': '#017374',
'deep violet': '#490648',
'denim blue': '#3b5b92',
'denim': '#3b638c',
'desert': '#ccad60',
'diarrhea': '#9f8303',
'dirt brown': '#836539',
'dirt': '#8a6e45',
'dirty blue': '#3f829d',
'dirty green': '#667e2c',
'dirty orange': '#c87606',
'dirty pink': '#ca7b80',
'dirty purple': '#734a65',
'dirty yellow': '#cdc50a',
'dodger blue': '#3e82fc',
'drab green': '#749551',
'drab': '#828344',
'dried blood': '#4b0101',
'duck egg blue': '#c3fbf4',
'dull blue': '#49759c',
'dull brown': '#876e4b',
'dull green': '#74a662',
'dull orange': '#d8863b',
'dull pink': '#d5869d',
'dull purple': '#84597e',
'dull red': '#bb3f3f',
'dull teal': '#5f9e8f',
'dull yellow': '#eedc5b',
'dusk blue': '#26538d',
'dusk': '#4e5481',
'dusky blue': '#475f94',
'dusky pink': '#cc7a8b',
'dusky purple': '#895b7b',
'dusky rose': '#ba6873',
'dust': '#b2996e',
'dusty blue': '#5a86ad',
'dusty green': '#76a973',
'dusty lavender': '#ac86a8',
'dusty orange': '#f0833a',
'dusty pink': '#d58a94',
'dusty purple': '#825f87',
'dusty red': '#b9484e',
'dusty rose': '#c0737a',
'dusty teal': '#4c9085',
'earth': '#a2653e',
'easter green': '#8cfd7e',
'easter purple': '#c071fe',
'ecru': '#feffca',
'egg shell': '#fffcc4',
'eggplant purple': '#430541',
'eggplant': '#380835',
'eggshell blue': '#c4fff7',
'eggshell': '#ffffd4',
'electric blue': '#0652ff',
'electric green': '#21fc0d',
'electric lime': '#a8ff04',
'electric pink': '#ff0490',
'electric purple': '#aa23ff',
'emerald green': '#028f1e',
'emerald': '#01a049',
'evergreen': '#05472a',
'faded blue': '#658cbb',
'faded green': '#7bb274',
'faded orange': '#f0944d',
'faded pink': '#de9dac',
'faded purple': '#916e99',
'faded red': '#d3494e',
'faded yellow': '#feff7f',
'fawn': '#cfaf7b',
'fern green': '#548d44',
'fern': '#63a950',
'fire engine red': '#fe0002',
'flat blue': '#3c73a8',
'flat green': '#699d4c',
'fluorescent green': '#08ff08',
'fluro green': '#0aff02',
'foam green': '#90fda9',
'forest green': '#06470c',
'forest': '#0b5509',
'forrest green': '#154406',
'french blue': '#436bad',
'fresh green': '#69d84f',
'frog green': '#58bc08',
'fuchsia': '#ed0dd9',
'gold': '#dbb40c',
'golden brown': '#b27a01',
'golden rod': '#f9bc08',
'golden yellow': '#fec615',
'golden': '#f5bf03',
'goldenrod': '#fac205',
'grape purple': '#5d1451',
'grape': '#6c3461',
'grapefruit': '#fd5956',
'grass green': '#3f9b0b',
'grass': '#5cac2d',
'grassy green': '#419c03',
'green apple': '#5edc1f',
'green blue': '#06b48b',
'green brown': '#544e03',
'green grey': '#77926f',
'green teal': '#0cb577',
'green yellow': '#c9ff27',
'green': '#15b01a',
'green/blue': '#01c08d',
'green/yellow': '#b5ce08',
'greenblue': '#23c48b',
'greenish beige': '#c9d179',
'greenish blue': '#0b8b87',
'greenish brown': '#696112',
'greenish cyan': '#2afeb7',
'greenish grey': '#96ae8d',
'greenish tan': '#bccb7a',
'greenish teal': '#32bf84',
'greenish turquoise': '#00fbb0',
'greenish yellow': '#cdfd02',
'greenish': '#40a368',
'greeny blue': '#42b395',
'greeny brown': '#696006',
'greeny grey': '#7ea07a',
'greeny yellow': '#c6f808',
'grey blue': '#6b8ba4',
'grey brown': '#7f7053',
'grey green': '#789b73',
'grey pink': '#c3909b',
'grey purple': '#826d8c',
'grey teal': '#5e9b8a',
'grey': '#929591',
'grey/blue': '#647d8e',
'grey/green': '#86a17d',
'greyblue': '#77a1b5',
'greyish blue': '#5e819d',
'greyish brown': '#7a6a4f',
'greyish green': '#82a67d',
'greyish pink': '#c88d94',
'greyish purple': '#887191',
'greyish teal': '#719f91',
'greyish': '#a8a495',
'gross green': '#a0bf16',
'gunmetal': '#536267',
'hazel': '#8e7618',
'heather': '#a484ac',
'heliotrope': '#d94ff5',
'highlighter green': '#1bfc06',
'hospital green': '#9be5aa',
'hot green': '#25ff29',
'hot magenta': '#f504c9',
'hot pink': '#ff028d',
'hot purple': '#cb00f5',
'hunter green': '#0b4008',
'ice blue': '#d7fffe',
'ice': '#d6fffa',
'icky green': '#8fae22',
'indian red': '#850e04',
'indigo blue': '#3a18b1',
'indigo': '#380282',
'iris': '#6258c4',
'irish green': '#019529',
'ivory': '#ffffcb',
'jade green': '#2baf6a',
'jade': '#1fa774',
'jungle green': '#048243',
'kelley green': '#009337',
'kelly green': '#02ab2e',
'kermit green': '#5cb200',
'key lime': '#aeff6e',
'khaki green': '#728639',
'khaki': '#aaa662',
'kiwi green': '#8ee53f',
'kiwi': '#9cef43',
'lavender blue': '#8b88f8',
'lavender pink': '#dd85d7',
'lavender': '#c79fef',
'lawn green': '#4da409',
'leaf green': '#5ca904',
'leaf': '#71aa34',
'leafy green': '#51b73b',
'leather': '#ac7434',
'lemon green': '#adf802',
'lemon lime': '#bffe28',
'lemon yellow': '#fdff38',
'lemon': '#fdff52',
'lichen': '#8fb67b',
'light aqua': '#8cffdb',
'light aquamarine': '#7bfdc7',
'light beige': '#fffeb6',
'light blue green': '#7efbb3',
'light blue grey': '#b7c9e2',
'light blue': '#95d0fc',
'light bluish green': '#76fda8',
'light bright green': '#53fe5c',
'light brown': '#ad8150',
'light burgundy': '#a8415b',
'light cyan': '#acfffc',
'light eggplant': '#894585',
'light forest green': '#4f9153',
'light gold': '#fddc5c',
'light grass green': '#9af764',
'light green blue': '#56fca2',
'light green': '#96f97b',
'light greenish blue': '#63f7b4',
'light grey blue': '#9dbcd4',
'light grey green': '#b7e1a1',
'light grey': '#d8dcd6',
'light indigo': '#6d5acf',
'light khaki': '#e6f2a2',
'light lavendar': '#efc0fe',
'light lavender': '#dfc5fe',
'light light blue': '#cafffb',
'light light green': '#c8ffb0',
'light lilac': '#edc8ff',
'light lime green': '#b9ff66',
'light lime': '#aefd6c',
'light magenta': '#fa5ff7',
'light maroon': '#a24857',
'light mauve': '#c292a1',
'light mint green': '#a6fbb2',
'light mint': '#b6ffbb',
'light moss green': '#a6c875',
'light mustard': '#f7d560',
'light navy blue': '#2e5a88',
'light navy': '#155084',
'light neon green': '#4efd54',
'light olive green': '#a4be5c',
'light olive': '#acbf69',
'light orange': '#fdaa48',
'light pastel green': '#b2fba5',
'light pea green': '#c4fe82',
'light peach': '#ffd8b1',
'light periwinkle': '#c1c6fc',
'light pink': '#ffd1df',
'light plum': '#9d5783',
'light purple': '#bf77f6',
'light red': '#ff474c',
'light rose': '#ffc5cb',
'light royal blue': '#3a2efe',
'light sage': '#bcecac',
'light salmon': '#fea993',
'light sea green': '#98f6b0',
'light seafoam green': '#a7ffb5',
'light seafoam': '#a0febf',
'light sky blue': '#c6fcff',
'light tan': '#fbeeac',
'light teal': '#90e4c1',
'light turquoise': '#7ef4cc',
'light urple': '#b36ff6',
'light violet': '#d6b4fc',
'light yellow green': '#ccfd7f',
'light yellow': '#fffe7a',
'light yellowish green': '#c2ff89',
'lightblue': '#7bc8f6',
'lighter green': '#75fd63',
'lighter purple': '#a55af4',
'lightgreen': '#76ff7b',
'lightish blue': '#3d7afd',
'lightish green': '#61e160',
'lightish purple': '#a552e6',
'lightish red': '#fe2f4a',
'lilac': '#cea2fd',
'liliac': '#c48efd',
'lime green': '#89fe05',
'lime yellow': '#d0fe1d',
'lime': '#aaff32',
'lipstick red': '#c0022f',
'lipstick': '#d5174e',
'macaroni and cheese': '#efb435',
'magenta': '#c20078',
'mahogany': '#4a0100',
'maize': '#f4d054',
'mango': '#ffa62b',
'manilla': '#fffa86',
'marigold': '#fcc006',
'marine blue': '#01386a',
'marine': '#042e60',
'maroon': '#650021',
'mauve': '#ae7181',
'medium blue': '#2c6fbb',
'medium brown': '#7f5112',
'medium green': '#39ad48',
'medium grey': '#7d7f7c',
'medium pink': '#f36196',
'medium purple': '#9e43a2',
'melon': '#ff7855',
'merlot': '#730039',
'metallic blue': '#4f738e',
'mid blue': '#276ab3',
'mid green': '#50a747',
'midnight blue': '#020035',
'midnight purple': '#280137',
'midnight': '#03012d',
'military green': '#667c3e',
'milk chocolate': '#7f4e1e',
'mint green': '#8fff9f',
'mint': '#9ffeb0',
'minty green': '#0bf77d',
'mocha': '#9d7651',
'moss green': '#658b38',
'moss': '#769958',
'mossy green': '#638b27',
'mud brown': '#60460f',
'mud green': '#606602',
'mud': '#735c12',
'muddy brown': '#886806',
'muddy green': '#657432',
'muddy yellow': '#bfac05',
'mulberry': '#920a4e',
'murky green': '#6c7a0e',
'mushroom': '#ba9e88',
'mustard brown': '#ac7e04',
'mustard green': '#a8b504',
'mustard yellow': '#d2bd0a',
'mustard': '#ceb301',
'muted blue': '#3b719f',
'muted green': '#5fa052',
'muted pink': '#d1768f',
'muted purple': '#805b87',
'nasty green': '#70b23f',
'navy blue': '#001146',
'navy green': '#35530a',
'navy': '#01153e',
'neon blue': '#04d9ff',
'neon green': '#0cff0c',
'neon pink': '#fe019a',
'neon purple': '#bc13fe',
'neon red': '#ff073a',
'neon yellow': '#cfff04',
'nice blue': '#107ab0',
'night blue': '#040348',
'ocean blue': '#03719c',
'ocean green': '#3d9973',
'ocean': '#017b92',
'ocher': '#bf9b0c',
'ochre': '#bf9005',
'ocre': '#c69c04',
'off blue': '#5684ae',
'off green': '#6ba353',
'off white': '#ffffe4',
'off yellow': '#f1f33f',
'old pink': '#c77986',
'old rose': '#c87f89',
'olive brown': '#645403',
'olive drab': '#6f7632',
'olive green': '#677a04',
'olive yellow': '#c2b709',
'olive': '#6e750e',
'orange brown': '#be6400',
'orange pink': '#ff6f52',
'orange red': '#fd411e',
'orange yellow': '#ffad01',
'orange': '#f97306',
'orangeish': '#fd8d49',
'orangered': '#fe420f',
'orangey brown': '#b16002',
'orangey red': '#fa4224',
'orangey yellow': '#fdb915',
'orangish brown': '#b25f03',
'orangish red': '#f43605',
'orangish': '#fc824a',
'orchid': '#c875c4',
'pale aqua': '#b8ffeb',
'pale blue': '#d0fefe',
'pale brown': '#b1916e',
'pale cyan': '#b7fffa',
'pale gold': '#fdde6c',
'pale green': '#c7fdb5',
'pale grey': '#fdfdfe',
'pale lavender': '#eecffe',
'pale light green': '#b1fc99',
'pale lilac': '#e4cbff',
'pale lime green': '#b1ff65',
'pale lime': '#befd73',
'pale magenta': '#d767ad',
'pale mauve': '#fed0fc',
'pale olive green': '#b1d27b',
'pale olive': '#b9cc81',
'pale orange': '#ffa756',
'pale peach': '#ffe5ad',
'pale pink': '#ffcfdc',
'pale purple': '#b790d4',
'pale red': '#d9544d',
'pale rose': '#fdc1c5',
'pale salmon': '#ffb19a',
'pale sky blue': '#bdf6fe',
'pale teal': '#82cbb2',
'pale turquoise': '#a5fbd5',
'pale violet': '#ceaefa',
'pale yellow': '#ffff84',
'pale': '#fff9d0',
'parchment': '#fefcaf',
'pastel blue': '#a2bffe',
'pastel green': '#b0ff9d',
'pastel orange': '#ff964f',
'pastel pink': '#ffbacd',
'pastel purple': '#caa0ff',
'pastel red': '#db5856',
'pastel yellow': '#fffe71',
'pea green': '#8eab12',
'pea soup green': '#94a617',
'pea soup': '#929901',
'pea': '#a4bf20',
'peach': '#ffb07c',
'peachy pink': '#ff9a8a',
'peacock blue': '#016795',
'pear': '#cbf85f',
'periwinkle blue': '#8f99fb',
'periwinkle': '#8e82fe',
'perrywinkle': '#8f8ce7',
'petrol': '#005f6a',
'pig pink': '#e78ea5',
'pine green': '#0a481e',
'pine': '#2b5d34',
'pink purple': '#db4bda',
'pink red': '#f5054f',
'pink': '#ff81c0',
'pink/purple': '#ef1de7',
'pinkish brown': '#b17261',
'pinkish grey': '#c8aca9',
'pinkish orange': '#ff724c',
'pinkish purple': '#d648d7',
'pinkish red': '#f10c45',
'pinkish tan': '#d99b82',
'pinkish': '#d46a7e',
'pinky purple': '#c94cbe',
'pinky red': '#fc2647',
'pinky': '#fc86aa',
'piss yellow': '#ddd618',
'pistachio': '#c0fa8b',
'plum purple': '#4e0550',
'plum': '#580f41',
'poison green': '#40fd14',
'poo brown': '#885f01',
'poo': '#8f7303',
'poop brown': '#7a5901',
'poop green': '#6f7c00',
'poop': '#7f5e00',
'powder blue': '#b1d1fc',
'powder pink': '#ffb2d0',
'primary blue': '#0804f9',
'prussian blue': '#004577',
'puce': '#a57e52',
'puke brown': '#947706',
'puke green': '#9aae07',
'puke yellow': '#c2be0e',
'puke': '#a5a502',
'pumpkin orange': '#fb7d07',
'pumpkin': '#e17701',
'pure blue': '#0203e2',
'purple blue': '#632de9',
'purple brown': '#673a3f',
'purple grey': '#866f85',
'purple pink': '#e03fd8',
'purple red': '#990147',
'purple': '#7e1e9c',
'purple/blue': '#5d21d0',
'purple/pink': '#d725de',
'purpleish blue': '#6140ef',
'purpleish pink': '#df4ec8',
'purpleish': '#98568d',
'purpley blue': '#5f34e7',
'purpley grey': '#947e94',
'purpley pink': '#c83cb9',
'purpley': '#8756e4',
'purplish blue': '#601ef9',
'purplish brown': '#6b4247',
'purplish grey': '#7a687f',
'purplish pink': '#ce5dae',
'purplish red': '#b0054b',
'purplish': '#94568c',
'purply blue': '#661aee',
'purply pink': '#f075e6',
'purply': '#983fb2',
'putty': '#beae8a',
'racing green': '#014600',
'radioactive green': '#2cfa1f',
'raspberry': '#b00149',
'raw sienna': '#9a6200',
'raw umber': '#a75e09',
'really light blue': '#d4ffff',
'red brown': '#8b2e16',
'red orange': '#fd3c06',
'red pink': '#fa2a55',
'red purple': '#820747',
'red violet': '#9e0168',
'red wine': '#8c0034',
'red': '#e50000',
'reddish brown': '#7f2b0a',
'reddish grey': '#997570',
'reddish orange': '#f8481c',
'reddish pink': '#fe2c54',
'reddish purple': '#910951',
'reddish': '#c44240',
'reddy brown': '#6e1005',
'rich blue': '#021bf9',
'rich purple': '#720058',
'robin egg blue': '#8af1fe',
'rosa': '#fe86a4',
'rose pink': '#f7879a',
'rose red': '#be013c',
'rose': '#cf6275',
'rosy pink': '#f6688e',
'rouge': '#ab1239',
'royal blue': '#0504aa',
'royal purple': '#4b006e',
'royal': '#0c1793',
'ruby': '#ca0147',
'russet': '#a13905',
'rust brown': '#8b3103',
'rust orange': '#c45508',
'rust red': '#aa2704',
'rust': '#a83c09',
'rusty orange': '#cd5909',
'rusty red': '#af2f0d',
'saffron': '#feb209',
'sage green': '#88b378',
'sage': '#87ae73',
'salmon pink': '#fe7b7c',
'salmon': '#ff796c',
'sand brown': '#cba560',
'sand yellow': '#fce166',
'sand': '#e2ca76',
'sandstone': '#c9ae74',
'sandy brown': '#c4a661',
'sandy yellow': '#fdee73',
'sandy': '#f1da7a',
'sap green': '#5c8b15',
'sapphire': '#2138ab',
'scarlet': '#be0119',
'sea blue': '#047495',
'sea green': '#53fca1',
'sea': '#3c9992',
'seafoam blue': '#78d1b6',
'seafoam green': '#7af9ab',
'seafoam': '#80f9ad',
'seaweed green': '#35ad6b',
'seaweed': '#18d17b',
'sepia': '#985e2b',
'shamrock green': '#02c14d',
'shamrock': '#01b44c',
'shit brown': '#7b5804',
'shit green': '#758000',
'shit': '#7f5f00',
'shocking pink': '#fe02a2',
'sick green': '#9db92c',
'sickly green': '#94b21c',
'sickly yellow': '#d0e429',
'sienna': '#a9561e',
'silver': '#c5c9c7',
'sky blue': '#75bbfd',
'sky': '#82cafc',
'slate blue': '#5b7c99',
'slate green': '#658d6d',
'slate grey': '#59656d',
'slate': '#516572',
'slime green': '#99cc04',
'snot green': '#9dc100',
'snot': '#acbb0d',
'soft blue': '#6488ea',
'soft green': '#6fc276',
'soft pink': '#fdb0c0',
'soft purple': '#a66fb5',
'spearmint': '#1ef876',
'spring green': '#a9f971',
'spruce': '#0a5f38',
'squash': '#f2ab15',
'steel blue': '#5a7d9a',
'steel grey': '#6f828a',
'steel': '#738595',
'stone': '#ada587',
'stormy blue': '#507b9c',
'straw': '#fcf679',
'strawberry': '#fb2943',
'strong blue': '#0c06f7',
'strong pink': '#ff0789',
'sun yellow': '#ffdf22',
'sunflower yellow': '#ffda03',
'sunflower': '#ffc512',
'sunny yellow': '#fff917',
'sunshine yellow': '#fffd37',
'swamp green': '#748500',
'swamp': '#698339',
'tan brown': '#ab7e4c',
'tan green': '#a9be70',
'tan': '#d1b26f',
'tangerine': '#ff9408',
'taupe': '#b9a281',
'tea green': '#bdf8a3',
'tea': '#65ab7c',
'teal blue': '#01889f',
'teal green': '#25a36f',
'teal': '#029386',
'tealish green': '#0cdc73',
'tealish': '#24bca8',
'terra cotta': '#c9643b',
'terracota': '#cb6843',
'terracotta': '#ca6641',
'tiffany blue': '#7bf2da',
'tomato red': '#ec2d01',
'tomato': '#ef4026',
'topaz': '#13bbaf',
'toupe': '#c7ac7d',
'toxic green': '#61de2a',
'tree green': '#2a7e19',
'true blue': '#010fcc',
'true green': '#089404',
'turquoise blue': '#06b1c4',
'turquoise green': '#04f489',
'turquoise': '#06c2ac',
'turtle green': '#75b84f',
'twilight blue': '#0a437a',
'twilight': '#4e518b',
'ugly blue': '#31668a',
'ugly brown': '#7d7103',
'ugly green': '#7a9703',
'ugly pink': '#cd7584',
'ugly purple': '#a442a0',
'ugly yellow': '#d0c101',
'ultramarine blue': '#1805db',
'ultramarine': '#2000b1',
'umber': '#b26400',
'velvet': '#750851',
'vermillion': '#f4320c',
'very dark blue': '#000133',
'very dark brown': '#1d0200',
'very dark green': '#062e03',
'very dark purple': '#2a0134',
'very light blue': '#d5ffff',
'very light brown': '#d3b683',
'very light green': '#d1ffbd',
'very light pink': '#fff4f2',
'very light purple': '#f6cefc',
'very pale blue': '#d6fffe',
'very pale green': '#cffdbc',
'vibrant blue': '#0339f8',
'vibrant green': '#0add08',
'vibrant purple': '#ad03de',
'violet blue': '#510ac9',
'violet pink': '#fb5ffc',
'violet red': '#a50055',
'violet': '#9a0eea',
'viridian': '#1e9167',
'vivid blue': '#152eff',
'vivid green': '#2fef10',
'vivid purple': '#9900fa',
'vomit green': '#89a203',
'vomit yellow': '#c7c10c',
'vomit': '#a2a415',
'warm blue': '#4b57db',
'warm brown': '#964e02',
'warm grey': '#978a84',
'warm pink': '#fb5581',
'warm purple': '#952e8f',
'washed out green': '#bcf5a6',
'water blue': '#0e87cc',
'watermelon': '#fd4659',
'weird green': '#3ae57f',
'wheat': '#fbdd7e',
'white': '#ffffff',
'windows blue': '#3778bf',
'wine red': '#7b0323',
'wine': '#80013f',
'wintergreen': '#20f986',
'wisteria': '#a87dc2',
'yellow brown': '#b79400',
'yellow green': '#c0fb2d',
'yellow ochre': '#cb9d06',
'yellow orange': '#fcb001',
'yellow tan': '#ffe36e',
'yellow': '#ffff14',
'yellow/green': '#c8fd3d',
'yellowgreen': '#bbf90f',
'yellowish brown': '#9b7a01',
'yellowish green': '#b0dd16',
'yellowish orange': '#ffab0f',
'yellowish tan': '#fcfc81',
'yellowish': '#faee66',
'yellowy brown': '#ae8b0c',
'yellowy green': '#bff128',
}
####################################################################################################
# https://drafts.csswg.org/css-color-4/#named-colors
CSS4_COLORS = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'rebeccapurple': '#663399',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#F4A460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32',
}
####################################################################################################
# Theses colors are from Qt5 QML http://doc.qt.io/qt-5/qml-color.html
QML_COLORS = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkgrey': '#a9a9a9',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'grey': '#808080',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgreen': '#90ee90',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
####################################################################################################
VALENTINA_COLORS = (
'black',
'blue',
'cornflowerblue',
'darkBlue',
'darkGreen',
'darkRed',
'darkviolet',
'deeppink',
'deepskyblue',
'goldenrod',
'green',
'lightsalmon',
'lime',
'mediumseagreen',
'orange',
'violet',
'yellow',
)
VALENTINA_COLORS = {name:QML_COLORS[name.lower()] for name in VALENTINA_COLORS}
|
FabriceSalvaire/PyValentina
|
Patro/GraphicStyle/Color/color_data.py
|
Python
|
gpl-3.0
| 43,156
|
[
"Amber"
] |
416e591efa8130cca2db46c8d3eb0c94e7f44884959539de959a6a5b535484d5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Wrapper classes for Cif input and output from Structures.
"""
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
from io import StringIO
import numpy as np
from functools import partial
from pathlib import Path
from inspect import getfullargspec as getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.io import zopen
from pymatgen.util.coord import in_coord_list_pbc, find_in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.core import Magmom
from pymatgen.core.operations import MagSymmOp
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()} # type: ignore
space_groups.update({sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()}) # type: ignore
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
"""
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
"""
Reads CifBlock from string.
:param string: String representation.
:return: CifBlock
"""
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible issue in cif file"
" at line: {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
"""
Reads CifFile from a string.
:param string: String representation.
:return: CifFile
"""
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if 'powder_pattern' in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
"""
Reads CifFile from a filename.
:param filename: Filename
:return: CifFile
"""
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
"""
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.warnings = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = ['_space_group_magn', '_atom_site_moment',
'_space_group_symop_magn']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif'] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.warnings.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.warnings.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3., 2 / 3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except Exception:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.warnings.append("Some fractional co-ordinates rounded to ideal values to "
"avoid issues with finite precision.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_parameters(*lengths, *angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
self.warnings.append(str(exc))
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.warnings.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "",
d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except Exception:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Defaulting to P1."
warnings.warn(msg)
self.warnings.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [i - np.floor(i) for i
in
op.translation_vector + centering_op.translation_vector]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal))
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get(
"_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (
data.data.get("_space_group_magn.number_BNS").split("."))))
msg = MagneticSpaceGroup(id)
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get(
"_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
return NotImplementedError(
"Non-standard settings not currently supported.")
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError(
"Incomplete specification to implement.")
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.warnings.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def parse_magmoms(self, data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception(
'Magmoms given in terms of crystal axes in magCIF spec.')
try:
magmoms = {
data["_atom_site_moment_label"][i]:
np.array(
[str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i])]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": "", "NO3": "N"}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(r"{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.warnings.append(msg)
return parsed_sym
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError(
"Incommensurate structures not currently supported.")
elif self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c,
atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(
data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Specie(symbol, o_s)
except Exception:
el = DummySpecie(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i],
np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [sum(c.values()) for c in coord_to_species.values()
if not set(c.elements) == {Element("O"), Element("H")}]
if any([o > 1 for o in sum_occu]):
msg = "Some occupancies (%s) sum to > 1! If they are within " \
"the tolerance, they will be rescaled." % str(sum_occu)
warnings.warn(msg)
self.warnings.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Specie 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Specie will have
# fictious oxidation state).
raise NotImplementedError(
'Disordered magnetic structures not currently supported.')
if coord_to_species.items():
for comp, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in
tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords,
magmoms_in=tmp_magmom,
lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) \
and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords,
site_properties=site_properties)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags['magcif']:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.warnings.append(str(exc))
warnings.warn(str(exc))
if self.warnings:
warnings.warn("Issues encountered while parsing CIF: %s" % "\n".join(self.warnings))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
raise RuntimeError("Bibliographic data extraction requires pybtex.")
bibtex_keys = {'author': ('_publ_author_name', '_citation_author_name'),
'title': ('_publ_section_title', '_citation_title'),
'journal': ('_journal_name_full', '_journal_name_abbrev',
'_citation_journal_full', '_citation_journal_abbrev'),
'volume': ('_journal_volume', '_citation_journal_volume'),
'year': ('_journal_year', '_citation_year'),
'number': ('_journal_number', '_citation_number'),
'page_first': ('_journal_page_first', '_citation_page_first'),
'page_last': ('_journal_page_last', '_citation_page_last'),
'doi': ('_journal_DOI', '_citation_DOI')}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if 'author' in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry['author'], list):
bibtex_entry['author'] = ' and '.join(bibtex_entry['author'])
# convert to bibtex page range format, use empty string if not specified
if ('page_first' in bibtex_entry) or ('page_last' in bibtex_entry):
bibtex_entry['pages'] = '{0}--{1}'.format(bibtex_entry.get('page_first', ''),
bibtex_entry.get('page_last', ''))
bibtex_entry.pop('page_first', None) # and remove page_first, page_list if present
bibtex_entry.pop('page_last', None)
# cite keys are given as cif-reference-idx in order they are found
entries['cif-reference-{}'.format(idx)] = Entry('article', list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format='bibtex')
def as_dict(self):
"""
:return: MSONable dict
"""
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
"""
:return: Whether there are errors/warnings detected in CIF parsing.
"""
return len(self.warnings) > 0
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
"""
def __init__(self, struct, symprec=None, write_magmoms=False):
"""
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
"""
if write_magmoms and symprec:
warnings.warn(
"Magnetic symmetry cannot currently be detected by pymatgen,"
"disabling symmetry detection.")
symprec = None
format_str = "{:.8f}"
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = "%.8f" % latt.volume
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 0
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(
site.properties.get('magmom', getattr(sp, 'spin', 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(
magmom, latt)
atom_site_moment_label.append(
"{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append("%.5f" % moment[0])
atom_site_moment_crystalaxis_y.append("%.5f" % moment[1])
atom_site_moment_crystalaxis_z.append("%.5f" % moment[2])
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block[
"_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block[
"_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block[
"_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(["_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
|
tschaume/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 58,730
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
f04cf9cc45d70c40323bd0511e6ee17b10c6f2ae8e6daabb8b56db9dbaf7d150
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import atexit
import datetime
import os
from typing import List, Union
from qcelemental.util import which, which_import
from . import core
# Numpy place holder for files and cleanup
numpy_files = []
def register_numpy_file(filename):
if not filename.endswith('.npy'): filename += '.npy'
if filename not in numpy_files:
numpy_files.append(filename)
def clean_numpy_files():
for nfile in numpy_files:
os.unlink(nfile)
atexit.register(clean_numpy_files)
def exit_printing(start_time: datetime.datetime = None, success: bool = None) -> None:
"""Prints the exit time and status.
Parameters
----------
start_time
starting time from which the elapsed time is computed.
success
Provides a success flag, otherwise uses the ``_success_flag_`` global variable
Returns
-------
None
"""
end_time = datetime.datetime.now()
core.print_out("\n Psi4 stopped on: {}".format(end_time.strftime('%A, %d %B %Y %I:%M%p')))
if start_time is not None:
run_time = end_time - start_time
run_time = str(run_time).split('.')
run_time = run_time[0] + '.' + run_time[1][:2]
core.print_out("\n Psi4 wall time for execution: {}\n".format(run_time))
if success is None:
success = _success_flag_
if success:
core.print_out("\n*** Psi4 exiting successfully. Buy a developer a beer!\n")
else:
core.print_out("\n*** Psi4 encountered an error. Buy a developer more coffee!\n")
core.print_out("*** Resources and help at github.com/psi4/psi4.\n")
_success_flag_ = False
# Working directory
_input_dir_ = os.getcwd()
def get_input_directory():
return _input_dir_
# Add-Ons
def _CMake_to_Py_boolean(cmakevar):
if cmakevar.upper() in ["1", "ON", "YES", "TRUE", "Y"]:
return True
else:
return False
def psi4_which(command, *, return_bool: bool = False, raise_error: bool = False,
raise_msg: str = None) -> Union[bool, None, str]:
"""Test to see if a command is available in Psi4 search path.
Returns
-------
str or None
By default, returns command path if command found or `None` if not.
Environment is $PSIPATH:$PATH, less any None values.
bool
When `return_bool=True`, returns whether or not found.
Raises
------
ModuleNotFoundError
When `raises_error=True` and command not found.
"""
lenv = (os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep) if x != '']) +
os.pathsep + os.environ.get('PATH', ''))
return which(command=command, return_bool=return_bool, raise_error=raise_error, raise_msg=raise_msg, env=lenv)
_addons_ = {
"ambit": _CMake_to_Py_boolean("@ENABLE_ambit@"),
"chemps2": _CMake_to_Py_boolean("@ENABLE_CheMPS2@"),
"dkh": _CMake_to_Py_boolean("@ENABLE_dkh@"),
"ecpint": _CMake_to_Py_boolean("@ENABLE_ecpint@"),
"libefp": which_import("pylibefp", return_bool=True),
"erd": _CMake_to_Py_boolean("@ENABLE_erd@"),
"gdma": _CMake_to_Py_boolean("@ENABLE_gdma@"),
"ipi": which_import("ipi", return_bool=True),
"pcmsolver": _CMake_to_Py_boolean("@ENABLE_PCMSolver@"),
"cppe": which_import("cppe", return_bool=True),
"simint": _CMake_to_Py_boolean("@ENABLE_simint@"),
"dftd3": psi4_which("dftd3", return_bool=True),
"cfour": psi4_which("xcfour", return_bool=True),
"mrcc": psi4_which("dmrcc", return_bool=True),
"gcp": psi4_which("gcp", return_bool=True),
"v2rdm_casscf": which_import("v2rdm_casscf", return_bool=True),
"gpu_dfcc": which_import("gpu_dfcc", return_bool=True),
"forte": which_import("forte", return_bool=True),
"snsmp2": which_import("snsmp2", return_bool=True),
"resp": which_import("resp", return_bool=True),
"psi4fockci": which_import("psi4fockci", return_bool=True),
"adcc": which_import("adcc", return_bool=True),
"mdi": which_import("mdi", return_bool=True),
"cct3": which_import("cct3", return_bool=True),
"dftd4": which_import("dftd4", return_bool=True),
}
def addons(request: str = None) -> Union[bool, List[str]]:
"""Returns boolean of whether Add-On *request* is available to Psi4,
either compiled in or searchable in $PSIPATH:$PATH, as relevant. If
*request* not passed, returns list of available Add-Ons.
"""
if request is None:
return sorted([k for k, v in _addons_.items() if v])
return _addons_[request.lower()]
# Testing
def test(extent: str = "full", extras: List = None) -> int:
"""Runs a test suite through pytest.
Parameters
----------
extent
{'smoke', 'quick', 'full', 'long'}
All choices are defined, but choices may be redundant in some projects.
* _smoke_ will be minimal "is-working?" test(s).
* _quick_ will be as much coverage as can be got quickly, approx. 1/3 tests.
* _full_ will be the whole test suite, less some exceedingly long outliers.
* _long_ will be the whole test suite.
extras
Additional arguments to pass to `pytest`.
Returns
-------
int
Return code from `pytest.main()`. 0 for pass, 1 for fail.
"""
try:
import pytest
except ImportError:
raise RuntimeError('Testing module `pytest` is not installed. Run `conda install pytest`')
abs_test_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "tests"])
command = ['-rws', '-v']
if extent.lower() == 'smoke':
command.extend(['-m', 'smoke'])
elif extent.lower() == 'quick':
command.extend(['-m', 'quick or smoke'])
elif extent.lower() == 'full':
command.extend(['-m', 'not long'])
elif extent.lower() == 'long':
pass
if extras is not None:
command.extend(extras)
command.extend(['--capture=sys', abs_test_dir])
retcode = pytest.main(command)
return retcode
|
psi4/psi4
|
psi4/extras.py
|
Python
|
lgpl-3.0
| 6,885
|
[
"CFOUR",
"Psi4"
] |
a499f48c717354db972932e34ae558481e058527df4eeadc08752760f0ea9355
|
import logging
import os
import sys
import tempfile
from galaxy.util import listify
from xml.etree import ElementTree as XmlET
import xml.etree.ElementTree
log = logging.getLogger( __name__ )
using_python_27 = sys.version_info[ :2 ] >= ( 2, 7 )
class Py26CommentedTreeBuilder ( XmlET.XMLTreeBuilder ):
# Python 2.6 uses ElementTree 1.2.x.
def __init__ ( self, html=0, target=None ):
XmlET.XMLTreeBuilder.__init__( self, html, target )
self._parser.CommentHandler = self.handle_comment
def handle_comment ( self, data ):
self._target.start( XmlET.Comment, {} )
self._target.data( data )
self._target.end( XmlET.Comment )
class Py27CommentedTreeBuilder ( XmlET.TreeBuilder ):
# Python 2.7 uses ElementTree 1.3.x.
def comment( self, data ):
self.start( XmlET.Comment, {} )
self.data( data )
self.end( XmlET.Comment )
def create_and_write_tmp_file( elems, use_indent=False ):
tmp_str = ''
for elem in listify( elems ):
tmp_str += xml_to_string( elem, use_indent=use_indent )
fh = tempfile.NamedTemporaryFile( 'wb', prefix="tmp-toolshed-cawrf" )
tmp_filename = fh.name
fh.close()
fh = open( tmp_filename, 'wb' )
fh.write( '<?xml version="1.0"?>\n' )
fh.write( tmp_str )
fh.close()
return tmp_filename
def create_element( tag, attributes=None, sub_elements=None ):
"""
Create a new element whose tag is the value of the received tag, and whose attributes are all
key / value pairs in the received attributes and sub_elements.
"""
if tag:
elem = XmlET.Element( tag )
if attributes:
# The received attributes is an odict to preserve ordering.
for k, v in attributes.items():
elem.set( k, v )
if sub_elements:
# The received attributes is an odict. These handle information that tends to be
# long text including paragraphs (e.g., description and long_description.
for k, v in sub_elements.items():
# Don't include fields that are blank.
if v:
if k == 'packages':
# The received sub_elements is an odict whose key is 'packages' and whose
# value is a list of ( name, version ) tuples.
for v_tuple in v:
sub_elem = XmlET.SubElement( elem, 'package' )
sub_elem_name, sub_elem_version = v_tuple
sub_elem.set( 'name', sub_elem_name )
sub_elem.set( 'version', sub_elem_version )
elif isinstance( v, list ):
sub_elem = XmlET.SubElement( elem, k )
# If v is a list, then it must be a list of tuples where the first
# item is the tag and the second item is the text value.
for v_tuple in v:
if len( v_tuple ) == 2:
v_tag = v_tuple[ 0 ]
v_text = v_tuple[ 1 ]
# Don't include fields that are blank.
if v_text:
v_elem = XmlET.SubElement( sub_elem, v_tag )
v_elem.text = v_text
else:
sub_elem = XmlET.SubElement( elem, k )
sub_elem.text = v
return elem
return None
def indent( elem, level=0 ):
"""
Prints an XML tree with each node indented according to its depth. This method is used to print the
shed tool config (e.g., shed_tool_conf.xml from the in-memory list of config_elems because each config_elem
in the list may be a hierarchical structure that was not created using the parse_xml() method below,
and so will not be properly written with xml.etree.ElementTree.tostring() without manually indenting
the tree first.
"""
i = "\n" + level * " "
if len( elem ):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for child in elem:
indent( child, level+1 )
if not child.tail or not child.tail.strip():
child.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and ( not elem.tail or not elem.tail.strip() ):
elem.tail = i
def parse_xml( file_name ):
"""Returns a parsed xml tree with comments intact."""
error_message = ''
fobj = open( file_name, 'r' )
if using_python_27:
try:
tree = XmlET.parse( fobj, parser=XmlET.XMLParser( target=Py27CommentedTreeBuilder() ) )
except Exception, e:
fobj.close()
error_message = "Exception attempting to parse %s: %s" % ( str( file_name ), str( e ) )
log.exception( error_message )
return None, error_message
else:
try:
tree = XmlET.parse( fobj, parser=Py26CommentedTreeBuilder() )
except Exception, e:
fobj.close()
error_message = "Exception attempting to parse %s: %s" % ( str( file_name ), str( e ) )
log.exception( error_message )
return None, error_message
fobj.close()
return tree, error_message
def xml_to_string( elem, encoding='utf-8', use_indent=False, level=0 ):
if elem is not None:
if use_indent:
# We were called from ToolPanelManager.config_elems_to_xml_file(), so
# set the level to 1 since level 0 is the <toolbox> tag set.
indent( elem, level=level )
if using_python_27:
xml_str = '%s\n' % xml.etree.ElementTree.tostring( elem, encoding=encoding, method="xml" )
else:
xml_str = '%s\n' % xml.etree.ElementTree.tostring( elem, encoding=encoding )
else:
xml_str = ''
return xml_str
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/util/xml_util.py
|
Python
|
gpl-3.0
| 6,092
|
[
"Galaxy"
] |
b3bed4ab711cb6ae6957ad1af61fb6fe7a4ea18f2f0b2707274d9e0c1aca140f
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
import calendar
import datetime
import errno
import getpass
import hashlib
import logging
import mimetypes
import optparse
import os
import random
import re
import sys
import tempfile
import time
import urllib
import urllib2
import google
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import backendinfo
from google.appengine.api import croninfo
from google.appengine.api import dosinfo
from google.appengine.api import queueinfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appengine_rpc
from google.appengine.tools import bulkloader
MAX_FILES_TO_CLONE = 100
LIST_DELIMITER = '\n'
TUPLE_DELIMITER = '|'
BACKENDS_ACTION = 'backends'
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
MAX_LOG_LEVEL = 4
MAX_BATCH_SIZE = 3200000
MAX_BATCH_COUNT = 100
MAX_BATCH_FILE_SIZE = 200000
BATCH_OVERHEAD = 500
verbosity = 1
PREFIXED_BY_ADMIN_CONSOLE_RE = '^(?:admin-console)(.*)'
SDK_PRODUCT = 'appcfg_py'
DAY = 24*3600
SUNDAY = 6
SUPPORTED_RUNTIMES = ('go', 'python', 'python27')
MEGA = 1024 * 1024
MILLION = 1000 * 1000
DEFAULT_RESOURCE_LIMITS = {
'max_file_size': 32 * MILLION,
'max_blob_size': 32 * MILLION,
'max_total_file_size': 150 * MEGA,
'max_file_count': 10000,
}
def PrintUpdate(msg):
"""Print a message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print >>sys.stderr, msg
def StatusUpdate(msg):
"""Print a status message to stderr."""
PrintUpdate(msg)
def ErrorUpdate(msg):
"""Print an error message to stderr."""
PrintUpdate(msg)
def _PrintErrorAndExit(stream, msg, exit_code=2):
"""Prints the given error message and exists the program.
Args:
stream: The stream (e.g. StringIO or file) to write the message to.
msg: The error message to display as a string.
exit_code: The integer code to pass to sys.exit().
"""
stream.write(msg)
sys.exit(exit_code)
class FileClassification(object):
"""A class to hold a file's classification.
This class both abstracts away the details of how we determine
whether a file is a regular, static or error file as well as acting
as a container for various metadata about the file.
"""
def __init__(self, config, filename):
"""Initializes a FileClassification instance.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
"""
self.__static_mime_type = self.__GetMimeTypeIfStaticFile(config, filename)
self.__error_mime_type, self.__error_code = self.__LookupErrorBlob(config,
filename)
@staticmethod
def __GetMimeTypeIfStaticFile(config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
if handler.mime_type is not None:
return handler.mime_type
else:
return FileClassification.__MimeType(filename)
return None
@staticmethod
def __LookupErrorBlob(config, filename):
"""Looks up the mime type and error_code for 'filename'.
Uses the error handlers in 'config' to determine if the file should
be treated as an error blob.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
A tuple of (mime_type, error_code), or (None, None) if this is not an
error blob. For example, ('text/plain', default) or ('image/gif',
timeout) or (None, None).
"""
if not config.error_handlers:
return (None, None)
for error_handler in config.error_handlers:
if error_handler.file == filename:
error_code = error_handler.error_code
if not error_code:
error_code = 'default'
if error_handler.mime_type is not None:
return (error_handler.mime_type, error_code)
else:
return (FileClassification.__MimeType(filename), error_code)
return (None, None)
@staticmethod
def __MimeType(filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.'
% (filename, default))
return default
return guess
def IsApplicationFile(self):
return self.__static_mime_type is None and self.__error_mime_type is None
def IsStaticFile(self):
return self.__static_mime_type is not None
def StaticMimeType(self):
return self.__static_mime_type
def IsErrorFile(self):
return self.__error_mime_type is not None
def ErrorMimeType(self):
return self.__error_mime_type
def ErrorCode(self):
return self.__error_code
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs,errorblobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[0]
tup = tup[1:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
def GetRemoteResourceLimits(rpcserver, config):
"""Get the resource limit as reported by the admin console.
Get the resource limits by querying the admin_console/appserver. The
actual limits returned depends on the server we are talking to and
could be missing values we expect or include extra values.
Args:
rpcserver: The RPC server to use.
config: The appyaml configuration.
Returns:
A dictionary.
"""
try:
StatusUpdate('Getting current resource limits.')
yaml_data = rpcserver.Send('/api/appversion/getresourcelimits',
app_id=config.application,
version=config.version)
except urllib2.HTTPError, err:
if err.code != 404:
raise
return {}
return yaml.safe_load(yaml_data)
def GetResourceLimits(rpcserver, config):
"""Gets the resource limits.
Gets the resource limits that should be applied to apps. Any values
that the server does not know about will have their default value
reported (although it is also possible for the server to report
values we don't know about).
Args:
rpcserver: The RPC server to use.
config: The appyaml configuration.
Returns:
A dictionary.
"""
resource_limits = DEFAULT_RESOURCE_LIMITS.copy()
resource_limits.update(GetRemoteResourceLimits(rpcserver, config))
return resource_limits
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error('Could not find version file at %s', version_filename)
return None
version_fh = open_fn(version_filename, 'r')
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def RetryWithBackoff(callable_func, retry_notify_func,
initial_delay=1, backoff_factor=2,
max_delay=60, max_tries=20):
"""Calls a function multiple times, backing off more and more each time.
Args:
callable_func: A function that performs some operation that should be
retried a number of times up on failure. Signature: () -> (done, value)
If 'done' is True, we'll immediately return (True, value)
If 'done' is False, we'll delay a bit and try again, unless we've
hit the 'max_tries' limit, in which case we'll return (False, value).
retry_notify_func: This function will be called immediately before the
next retry delay. Signature: (value, delay) -> None
'value' is the value returned by the last call to 'callable_func'
'delay' is the retry delay, in seconds
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_delay: Maximum delay, in seconds.
max_tries: Maximum number of tries (the first one counts).
Returns:
What the last call to 'callable_func' returned, which is of the form
(done, value). If 'done' is True, you know 'callable_func' returned True
before we ran out of retries. If 'done' is False, you know 'callable_func'
kept returning False and we ran out of retries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
num_tries = 0
while True:
done, opaque_value = callable_func()
num_tries += 1
if done:
return True, opaque_value
if num_tries >= max_tries:
return False, opaque_value
retry_notify_func(opaque_value, delay)
time.sleep(delay)
delay = min(delay * backoff_factor, max_delay)
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class UpdateCheck(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
config,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new UpdateCheck.
Args:
rpcserver: The AbstractRpcServer to use.
config: The yaml object that specifies the configuration of this
application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
self.rpcserver = rpcserver
self.config = config
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
if self.config.api_version not in version['api_versions']:
logging.critical('The api_version specified in app.yaml (%s) is not '
'supported by this release of the SDK. The supported '
'api_versions are %s.',
self.config.api_version, version['api_versions'])
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support the api_version named in
self.config.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
try:
response = self.rpcserver.Send('/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=self.config.runtime)
except urllib2.URLError, e:
logging.info('Update check failed: %s', e)
return
latest = yaml.safe_load(response)
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
api_versions = latest['api_versions']
if self.config.api_version not in api_versions:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % self.config.api_version,
latest, version, force=True)
return
if self.config.api_version != api_versions[len(api_versions) - 1]:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
self.config.api_version, latest, version)
return
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = UpdateCheck.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, 'r')
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = UpdateCheck.MakeNagFilename()
try:
fh = self.open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit http://code.google.com/appengine for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' % UpdateCheck.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % UpdateCheck.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, rpcserver, config, definitions):
"""Creates a new DatastoreIndexUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
definitions: An IndexDefinitions object.
"""
self.rpcserver = rpcserver
self.config = config
self.definitions = definitions
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate('Uploading index definitions.')
self.rpcserver.Send('/api/datastore/index/add',
app_id=self.config.application,
version=self.config.version,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, rpcserver, config, cron):
"""Creates a new CronEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
cron: The CronInfoExternal object loaded from the cron.yaml file.
"""
self.rpcserver = rpcserver
self.config = config
self.cron = cron
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate('Uploading cron entries.')
self.rpcserver.Send('/api/cron/update',
app_id=self.config.application,
version=self.config.version,
payload=self.cron.ToYAML())
class QueueEntryUpload(object):
"""Provides facilities to upload task queue entries to the hosting service."""
def __init__(self, rpcserver, config, queue):
"""Creates a new QueueEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
queue: The QueueInfoExternal object loaded from the queue.yaml file.
"""
self.rpcserver = rpcserver
self.config = config
self.queue = queue
def DoUpload(self):
"""Uploads the task queue entries."""
StatusUpdate('Uploading task queue entries.')
self.rpcserver.Send('/api/queue/update',
app_id=self.config.application,
version=self.config.version,
payload=self.queue.ToYAML())
class DosEntryUpload(object):
"""Provides facilities to upload dos entries to the hosting service."""
def __init__(self, rpcserver, config, dos):
"""Creates a new DosEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
dos: The DosInfoExternal object loaded from the dos.yaml file.
"""
self.rpcserver = rpcserver
self.config = config
self.dos = dos
def DoUpload(self):
"""Uploads the dos entries."""
StatusUpdate('Uploading DOS entries.')
self.rpcserver.Send('/api/dos/update',
app_id=self.config.application,
version=self.config.version,
payload=self.dos.ToYAML())
class DefaultVersionSet(object):
"""Provides facilities to set the default (serving) version."""
def __init__(self, rpcserver, config):
"""Creates a new DefaultVersionSet.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
"""
self.rpcserver = rpcserver
self.config = config
def SetVersion(self):
"""Sets the default version."""
StatusUpdate('Setting default version to %s.' % (self.config.version,))
self.rpcserver.Send('/api/appversion/setdefault',
app_id=self.config.application,
version=self.config.version)
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, rpcserver, config):
"""Creates a new IndexOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
"""
self.rpcserver = rpcserver
self.config = config
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate('Fetching index definitions diff.')
response = self.rpcserver.Send('/api/datastore/index/diff',
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate('Deleting selected index definitions.')
response = self.rpcserver.Send('/api/datastore/index/delete',
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, rpcserver, config, force,
confirmation_fn=raw_input):
"""Creates a new VacuumIndexesOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
"""
super(VacuumIndexesOperation, self).__init__(rpcserver, config)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print 'This index is no longer defined in your index.yaml file.'
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
'Are you sure you want to delete this index? (N/y/a): ')
confirmation = confirmation.strip().lower()
if confirmation == 'y':
return True
elif confirmation == 'n' or not confirmation:
return False
elif confirmation == 'a':
self.force = True
return True
else:
print 'Did not understand your response.'
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ('An index was not deleted. Most likely this is '
'because it no longer exists.\n\n')
else:
warning_message = ('%d indexes were not deleted. Most likely this '
'is because they no longer exist.\n\n'
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self, rpcserver, config, output_file,
num_days, append, severity, end, vhost, include_vhost,
include_all=None, time_func=time.time):
"""Constructor.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
end: date object representing last day of logs to return.
vhost: The virtual host of log messages to get. None for all hosts.
include_vhost: If true, the virtual host is included in log messages.
include_all: If true, we add to the log message everything we know
about the request.
time_func: Method that return a timestamp representing now (for testing).
"""
self.rpcserver = rpcserver
self.config = config
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.vhost = vhost
self.include_vhost = include_vhost
self.include_all = include_all
self.version_id = self.config.version + '.1'
self.sentinel = None
self.write_mode = 'w'
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = 'a'
self.skip_until = False
now = PacificDate(time_func())
if end < now:
self.skip_until = end
else:
end = now
self.valid_dates = None
if self.num_days:
start = end - datetime.timedelta(self.num_days - 1)
self.valid_dates = (start, end)
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
StatusUpdate('Downloading request logs for %s %s.' %
(self.config.application, self.version_id))
tf = tempfile.TemporaryFile()
last_offset = None
try:
while True:
try:
new_offset = self.RequestLogLines(tf, last_offset)
if not new_offset or new_offset == last_offset:
break
last_offset = new_offset
except KeyboardInterrupt:
StatusUpdate('Keyboard interrupt; saving data downloaded so far.')
break
StatusUpdate('Copying request logs to %r.' % self.output_file)
if self.output_file == '-':
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate('Copied %d records.' % line_count)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info('Request with offset %r.', offset)
kwds = {'app_id': self.config.application,
'version': self.version_id,
'limit': 1000,
}
if offset:
kwds['offset'] = offset
if self.severity is not None:
kwds['severity'] = str(self.severity)
if self.vhost is not None:
kwds['vhost'] = str(self.vhost)
if self.include_vhost is not None:
kwds['include_vhost'] = str(self.include_vhost)
if self.include_all is not None:
kwds['include_all'] = str(self.include_all)
response = self.rpcserver.Send('/api/request_logs', payload=None, **kwds)
response = response.replace('\r', '\0')
lines = response.splitlines()
logging.info('Received %d bytes, %d records.', len(response), len(lines))
offset = None
if lines and lines[0].startswith('#'):
match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0])
del lines[0]
if match:
offset = match.group(1)
if lines and lines[-1].startswith('#'):
del lines[-1]
valid_dates = self.valid_dates
sentinel = self.sentinel
skip_until = self.skip_until
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if (sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ('', '\0')):
return None
linedate = DateOfLogLine(line)
if not linedate:
continue
if skip_until:
if linedate > skip_until:
continue
else:
self.skip_until = skip_until = False
if valid_dates and not valid_dates[0] <= linedate <= valid_dates[1]:
return None
tf.write(line + '\n')
if not lines:
return None
return offset
def DateOfLogLine(line):
"""Returns a date object representing the log line's timestamp.
Args:
line: a log line string.
Returns:
A date object representing the timestamp or None if parsing fails.
"""
m = re.compile(r'[^[]+\[(\d+/[A-Za-z]+/\d+):[^\d]*').match(line)
if not m:
return None
try:
return datetime.date(*time.strptime(m.group(1), '%d/%b/%Y')[:3])
except ValueError:
return None
def PacificDate(now):
"""For a UTC timestamp, return the date in the US/Pacific timezone.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A date object representing what day it is in the US/Pacific timezone.
"""
return datetime.date(*time.gmtime(PacificTime(now))[:3])
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these 'sub-lines'
separated by null bytes are not reversed. E.g. If the input is
'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'.
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ''
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = ''.join(lines).replace('\0', '\n\t')
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == '-':
StatusUpdate('Can\'t combine --append with output to stdout.')
sys.exit(2)
try:
fp = open(filename, 'rb')
except IOError, err:
StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err))
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith('\t'):
sentinel = line
if not sentinel:
StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' %
filename)
return None
return sentinel.rstrip('\n')
finally:
fp.close()
class UploadBatcher(object):
"""Helper to batch file uploads."""
def __init__(self, what, rpcserver, params):
"""Constructor.
Args:
what: Either 'file' or 'blob' or 'errorblob' indicating what kind of
objects this batcher uploads. Used in messages and URLs.
rpcserver: The RPC server.
params: A dictionary object containing URL params to add to HTTP requests.
"""
assert what in ('file', 'blob', 'errorblob'), repr(what)
self.what = what
self.params = params
self.rpcserver = rpcserver
self.single_url = '/api/appversion/add' + what
self.batch_url = self.single_url + 's'
self.batching = True
self.batch = []
self.batch_size = 0
def SendBatch(self):
"""Send the current batch on its way.
If successful, resets self.batch and self.batch_size.
Raises:
HTTPError with code=404 if the server doesn't support batching.
"""
boundary = 'boundary'
parts = []
for path, payload, mime_type in self.batch:
while boundary in payload:
boundary += '%04x' % random.randint(0, 0xffff)
assert len(boundary) < 80, 'Unexpected error, please try again.'
part = '\n'.join(['',
'X-Appcfg-File: %s' % urllib.quote(path),
'X-Appcfg-Hash: %s' % _Hash(payload),
'Content-Type: %s' % mime_type,
'Content-Length: %d' % len(payload),
'Content-Transfer-Encoding: 8bit',
'',
payload,
])
parts.append(part)
parts.insert(0,
'MIME-Version: 1.0\n'
'Content-Type: multipart/mixed; boundary="%s"\n'
'\n'
'This is a message with multiple parts in MIME format.' %
boundary)
parts.append('--\n')
delimiter = '\n--%s' % boundary
payload = delimiter.join(parts)
logging.info('Uploading batch of %d %ss to %s with boundary="%s".',
len(self.batch), self.what, self.batch_url, boundary)
self.rpcserver.Send(self.batch_url,
payload=payload,
content_type='message/rfc822',
**self.params)
self.batch = []
self.batch_size = 0
def SendSingleFile(self, path, payload, mime_type):
"""Send a single file on its way."""
logging.info('Uploading %s %s (%s bytes, type=%s) to %s.',
self.what, path, len(payload), mime_type, self.single_url)
self.rpcserver.Send(self.single_url,
payload=payload,
content_type=mime_type,
path=path,
**self.params)
def Flush(self):
"""Flush the current batch.
This first attempts to send the batch as a single request; if that
fails because the server doesn't support batching, the files are
sent one by one, and self.batching is reset to False.
At the end, self.batch and self.batch_size are reset.
"""
if not self.batch:
return
try:
self.SendBatch()
except urllib2.HTTPError, err:
if err.code != 404:
raise
logging.info('Old server detected; turning off %s batching.', self.what)
self.batching = False
for path, payload, mime_type in self.batch:
self.SendSingleFile(path, payload, mime_type)
self.batch = []
self.batch_size = 0
def AddToBatch(self, path, payload, mime_type):
"""Batch a file, possibly flushing first, or perhaps upload it directly.
Args:
path: The name of the file.
payload: The contents of the file.
mime_type: The MIME Content-type of the file, or None.
If mime_type is None, application/octet-stream is substituted.
"""
if not mime_type:
mime_type = 'application/octet-stream'
size = len(payload)
if size <= MAX_BATCH_FILE_SIZE:
if (len(self.batch) >= MAX_BATCH_COUNT or
self.batch_size + size > MAX_BATCH_SIZE):
self.Flush()
if self.batching:
logging.info('Adding %s %s (%s bytes, type=%s) to batch.',
self.what, path, size, mime_type)
self.batch.append((path, payload, mime_type))
self.batch_size += size + BATCH_OVERHEAD
return
self.SendSingleFile(path, payload, mime_type)
def _FormatHash(h):
"""Return a string representation of a hash.
The hash is a sha1 hash. It is computed both for files that need to be
pushed to App Engine and for data payloads of requests made to App Engine.
Args:
h: The hash
Returns:
The string representation of the hash.
"""
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def _Hash(content):
"""Compute the sha1 hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = hashlib.sha1(content).hexdigest()
return _FormatHash(h)
def _HashFromFileHandle(file_handle):
"""Compute the hash of the content of the file pointed to by file_handle.
Args:
file_handle: File-like object which provides seek, read and tell.
Returns:
The string representation of the hash.
"""
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
return content_hash
def EnsureDir(path):
"""Makes sure that a directory exists at the given path.
If a directory already exists at that path, nothing is done.
Otherwise, try to create a directory at that path with os.makedirs.
If that fails, propagate the resulting OSError exception.
Args:
path: The path that you want to refer to a directory.
"""
try:
os.makedirs(path)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def DoDownloadApp(rpcserver, out_dir, app_id, app_version):
"""Downloads the files associated with a particular app version.
Args:
rpcserver: The RPC server to use to download.
out_dir: The directory the files should be downloaded to.
app_id: The app ID of the app whose files we want to download.
app_version: The version number we want to download. Can be:
- None: We'll download the latest default version.
- <major>: We'll download the latest minor version.
- <major>/<minor>: We'll download that exact version.
"""
StatusUpdate('Fetching file list...')
url_args = {'app_id': app_id}
if app_version is not None:
url_args['version_match'] = app_version
result = rpcserver.Send('/api/files/list', **url_args)
StatusUpdate('Fetching files...')
lines = result.splitlines()
if len(lines) < 1:
logging.error('Invalid response from server: empty')
return
full_version = lines[0]
file_lines = lines[1:]
current_file_number = 0
num_files = len(file_lines)
num_errors = 0
for line in file_lines:
parts = line.split('|', 2)
if len(parts) != 3:
logging.error('Invalid response from server: expecting '
'"<id>|<size>|<path>", found: "%s"\n', line)
return
current_file_number += 1
file_id, size_str, path = parts
try:
size = int(size_str)
except ValueError:
logging.error('Invalid file list entry from server: invalid size: '
'"%s"', size_str)
return
StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path))
def TryGet():
"""A request to /api/files/get which works with the RetryWithBackoff."""
try:
contents = rpcserver.Send('/api/files/get', app_id=app_id,
version=full_version, id=file_id)
return True, contents
except urllib2.HTTPError, exc:
if exc.code == 503:
return False, exc
else:
raise
def PrintRetryMessage(_, delay):
StatusUpdate('Server busy. Will try again in %d seconds.' % delay)
success, contents = RetryWithBackoff(TryGet, PrintRetryMessage)
if not success:
logging.error('Unable to download file "%s".', path)
num_errors += 1
continue
if len(contents) != size:
logging.error('File "%s": server listed as %d bytes but served '
'%d bytes.', path, size, len(contents))
num_errors += 1
full_path = os.path.join(out_dir, path)
if os.path.exists(full_path):
logging.error('Unable to create file "%s": path conflicts with '
'an existing file or directory', path)
num_errors += 1
continue
full_dir = os.path.dirname(full_path)
try:
EnsureDir(full_dir)
except OSError, exc:
logging.error('Couldn\'t create directory "%s": %s', full_dir, exc)
num_errors += 1
continue
try:
out_file = open(full_path, 'wb')
except IOError, exc:
logging.error('Couldn\'t open file "%s": %s', full_path, exc)
num_errors += 1
continue
try:
try:
out_file.write(contents)
except IOError, exc:
logging.error('Couldn\'t write to file "%s": %s', full_path, exc)
num_errors += 1
continue
finally:
out_file.close()
if num_errors > 0:
logging.error('Number of errors: %d. See output for details.', num_errors)
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config', or an alternate version override.
backend: The backend to update, if any.
files: A dictionary of files to upload to the rpcserver, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
started: True iff the StartServing method has been called.
"""
def __init__(self, rpcserver, config, version=None, backend=None,
error_fh=None):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
version: If specified, overrides the app version specified in config.
backend: If specified, indicates the update applies to the given backend.
The backend name must match an entry in the backends: stanza.
error_fh: Unexpected HTTPErrors are printed to this file handle.
"""
self.rpcserver = rpcserver
self.config = config
self.app_id = self.config.application
self.backend = backend
self.error_fh = error_fh or sys.stderr
if version:
self.version = version
else:
self.version = self.config.version
self.params = {}
if self.app_id:
self.params['app_id'] = self.app_id
if self.backend:
self.params['backend'] = self.backend
elif self.version:
self.params['version'] = self.version
self.files = {}
self.all_files = set()
self.in_transaction = False
self.deployed = False
self.started = False
self.batching = True
self.file_batcher = UploadBatcher('file', self.rpcserver, self.params)
self.blob_batcher = UploadBatcher('blob', self.rpcserver, self.params)
self.errorblob_batcher = UploadBatcher('errorblob', self.rpcserver,
self.params)
def Send(self, url, payload=''):
"""Sends a request to the server, with common params."""
logging.info('Send: %s, params=%s', url, self.params)
return self.rpcserver.Send(url, payload=payload, **self.params)
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, 'Already in a transaction.'
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
content_hash = _HashFromFileHandle(file_handle)
self.files[path] = content_hash
self.all_files.add(path)
def Describe(self):
"""Returns a string describing the object being updated."""
result = 'app: %s' % self.app_id
if self.backend:
result += ', backend: %s' % self.backend
elif self.version:
result += ', version: %s' % self.version
return result
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, 'Already in a transaction.'
self.Send('/api/appversion/create', payload=self.config.ToYAML())
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
errorblobs = {}
for path, content_hash in self.files.iteritems():
file_classification = FileClassification(self.config, path)
if file_classification.IsStaticFile():
blobs_to_clone.append((path, content_hash,
file_classification.StaticMimeType()))
if file_classification.IsErrorFile():
errorblobs[path] = content_hash
if file_classification.IsApplicationFile():
files_to_clone.append((path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate('Cloning %d %s file%s.' %
(len(files), file_type, len(files) != 1 and 's' or ''))
for i in xrange(0, len(files), MAX_FILES_TO_CLONE):
if i > 0 and i % MAX_FILES_TO_CLONE == 0:
StatusUpdate('Cloned %d files.' % i)
chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)]
result = self.Send(url, payload=BuildClonePostBody(chunk))
if result:
files_to_upload.update(dict(
(f, self.files[f]) for f in result.split(LIST_DELIMITER)))
CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static')
CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
logging.debug('Files to upload: %s', files_to_upload)
for (path, content_hash) in errorblobs.iteritems():
files_to_upload[path] = content_hash
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, 'Begin() must be called before UploadFile().'
if path not in self.files:
raise KeyError('File \'%s\' is not in the list of files to be uploaded.'
% path)
del self.files[path]
file_classification = FileClassification(self.config, path)
payload = file_handle.read()
if file_classification.IsStaticFile():
self.blob_batcher.AddToBatch(path, payload,
file_classification.StaticMimeType())
if file_classification.IsErrorFile():
self.errorblob_batcher.AddToBatch(file_classification.ErrorCode(),
payload,
file_classification.ErrorMimeType())
if file_classification.IsApplicationFile():
self.file_batcher.AddToBatch(path, payload, None)
def Precompile(self):
"""Handle bytecode precompilation."""
StatusUpdate('Compilation starting.')
files = []
if self.config.runtime == 'go':
for f in self.all_files:
if f.endswith('.go') and not self.config.nobuild_files.match(f):
files.append(f)
while True:
if files:
StatusUpdate('Compilation: %d files left.' % len(files))
files = self.PrecompileBatch(files)
if not files:
break
StatusUpdate('Compilation completed.')
def PrecompileBatch(self, files):
"""Precompile a batch of files.
Args:
files: Either an empty list (for the initial request) or a list
of files to be precompiled.
Returns:
Either an empty list (if no more files need to be precompiled)
or a list of files to be precompiled subsequently.
"""
payload = LIST_DELIMITER.join(files)
response = self.Send('/api/appversion/precompile', payload=payload)
if not response:
return []
return response.split(LIST_DELIMITER)
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Commit().'
if self.files:
raise Exception('Not all required files have been uploaded.')
def PrintRetryMessage(_, delay):
StatusUpdate('Will check again in %s seconds.' % delay)
app_summary = None
try:
app_summary = self.Deploy()
success, unused_contents = RetryWithBackoff(
lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20)
if not success:
logging.warning('Version still not ready to serve, aborting.')
raise Exception('Version not ready.')
result = self.StartServing()
if not result:
self.in_transaction = False
else:
success, unused_contents = RetryWithBackoff(
lambda: (self.IsServing(), None), PrintRetryMessage, 1, 1, 1, 60)
if not success:
logging.warning('Version still not serving, aborting.')
raise Exception('Version not ready.')
self.in_transaction = False
except urllib2.HTTPError, e:
if e.code != 404:
raise
StatusUpdate('Closing update.')
self.Send('/api/appversion/commit')
self.in_transaction = False
return app_summary
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Deploy().'
if self.files:
raise Exception('Not all required files have been uploaded.')
StatusUpdate('Starting deployment.')
result = self.Send('/api/appversion/deploy')
self.deployed = True
if result:
return yaml_object.BuildSingleObject(appinfo.AppInfoSummary, result)
else:
return None
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Checking if deployment succeeded.')
result = self.Send('/api/appversion/isready')
return result == '1'
def StartServing(self):
"""Start serving with the newly created version.
Raises:
Exception: Deploy has not yet been called.
Returns:
The response body, as a string.
"""
assert self.deployed, 'Deploy() must be called before StartServing().'
StatusUpdate('Deployment successful.')
self.params['willcheckserving'] = '1'
result = self.Send('/api/appversion/startserving')
del self.params['willcheckserving']
self.started = True
return result
def IsServing(self):
"""Check if the new app version is serving.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the deployed app version is serving.
"""
assert self.started, 'StartServing() must be called before IsServing().'
StatusUpdate('Checking if updated app version is serving.')
result = self.Send('/api/appversion/isserving')
return result == '1'
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate('Rolling back the update.')
self.Send('/api/appversion/rollback')
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, openfunc, max_size_override=None):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
openfunc: A function that takes a path and returns a file-like object.
max_size_override: The maximum size file to upload (or None to use server
returned resource limits). For historic reasons, this size applies
to both files and blobs (while server resource limits can be
varied independently).
Returns:
An appinfo.AppInfoSummary if one was returned from the server, None
otherwise.
"""
logging.info('Reading app configuration.')
StatusUpdate('\nStarting update of %s' % self.Describe())
path = ''
try:
resource_limits = GetResourceLimits(self.rpcserver, self.config)
StatusUpdate('Scanning files on local disk.')
num_files = 0
for path in paths:
file_handle = openfunc(path)
file_classification = FileClassification(self.config, path)
try:
file_length = GetFileLength(file_handle)
if max_size_override is not None:
max_size = max_size_override
elif file_classification.IsApplicationFile():
max_size = resource_limits['max_file_size']
else:
max_size = resource_limits['max_blob_size']
if file_length > max_size:
logging.error('Ignoring file \'%s\': Too long '
'(max %d bytes, file is %d bytes)',
path, max_size, file_length)
else:
logging.info('Processing file \'%s\'', path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Scanned %d files.' % num_files)
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
raise
except EnvironmentError, e:
logging.error('An error occurred processing file \'%s\': %s. Aborting.',
path, e)
raise
app_summary = None
try:
missing_files = self.Begin()
if missing_files:
StatusUpdate('Uploading %d files and blobs.' % len(missing_files))
num_files = 0
for missing_file in missing_files:
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Processed %d out of %s.' %
(num_files, len(missing_files)))
self.file_batcher.Flush()
self.blob_batcher.Flush()
self.errorblob_batcher.Flush()
StatusUpdate('Uploaded %d files and blobs' % num_files)
if (self.config.derived_file_type and
appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type):
try:
self.Precompile()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
if e.code == 422 or self.config.runtime == 'go':
raise
print >>self.error_fh, (
'Precompilation failed. Your app can still serve but may '
'have reduced startup performance. You can retry the update '
'later to retry the precompilation step.')
app_summary = self.Commit()
StatusUpdate('Completed update of %s' % self.Describe())
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
self.Rollback()
raise
except urllib2.HTTPError, err:
logging.info('HTTP Error (%s)', err)
self.Rollback()
raise
except:
logging.exception('An unexpected error occurred. Aborting.')
self.Rollback()
raise
logging.info('Done!')
return app_summary
def FileIterator(base, skip_files, runtime, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
skip_files: A regular expression object for files/directories to skip.
separator: Path separator used by the running system's platform.
runtime: The name of the runtime e.g. "python". If "python27" then .pyc
files with matching .py files will be skipped.
Yields:
Paths of files found, relative to base.
"""
dirs = ['']
while dirs:
current_dir = dirs.pop()
entries = set(os.listdir(os.path.join(base, current_dir)))
for entry in sorted(entries):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if separator == '\\':
name = name.replace('\\', '/')
if runtime == 'python27' and not skip_files.match(name):
root, extension = os.path.splitext(entry)
if extension == '.pyc' and (root + '.py') in entries:
logging.warning('Ignoring file \'%s\': Cannot upload both '
'<filename>.py and <filename>.pyc', name)
continue
if os.path.isfile(fullname):
if skip_files.match(name):
logging.info('Ignoring file \'%s\': File matches ignore regex.', name)
else:
yield name
elif os.path.isdir(fullname):
if skip_files.match(name):
logging.info(
'Ignoring directory \'%s\': Directory matches ignore regex.',
name)
else:
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
"""
product_tokens = []
sdk_name = os.environ.get('APPCFG_SDK_NAME')
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
product_tokens.append('%s/%s' % (SDK_PRODUCT, release))
product_tokens.append(get_platform())
python_version = '.'.join(str(i) for i in sys.version_info)
product_tokens.append('Python/%s' % python_version)
return ' '.join(product_tokens)
def GetSourceName(get_version=GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
return 'Google-appcfg-%s' % (release,)
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
raw_input_fn: Function used for getting raw user input, like email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=appengine_rpc.HttpRpcServer,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass,
out_fh=sys.stdout,
error_fh=sys.stderr,
update_check_class=UpdateCheck,
throttle_class=None,
opener=open,
file_iterator=FileIterator,
time_func=time.time,
wrap_server_error_message=True):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
raw_input_fn: Function used for getting user email.
password_input_fn: Function used for getting user password.
out_fh: All normal output is printed to this file handle.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: UpdateCheck class (can be replaced for testing).
throttle_class: A class to use instead of ThrottledHttpRpcServer
(only used in the bulkloader).
opener: Function used for opening files.
file_iterator: Callable that takes (basepath, skip_files, file_separator)
and returns a generator that yields all filenames in the file tree
rooted at that path, skipping files that match the skip_files compiled
regular expression.
time_func: Function which provides the current time (can be replaced for
testing).
wrap_server_error_message: If true, the error messages from
urllib2.HTTPError exceptions in Run() are wrapped with
'--- begin server output ---' and '--- end server output ---',
otherwise the error message is printed as is.
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.raw_input_fn = raw_input_fn
self.password_input_fn = password_input_fn
self.out_fh = out_fh
self.error_fh = error_fh
self.update_check_class = update_check_class
self.throttle_class = throttle_class
self.time_func = time_func
self.wrap_server_error_message = wrap_server_error_message
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if self.options.max_size is not None:
print >>sys.stderr, """\
WARNING: -S/--max_size is deprecated. The server provides the current value;
you do not need to override the size except in rare cases."""
if len(self.args) < 1:
self._PrintHelpAndExit()
if not self.options.allow_any_runtime:
if self.options.runtime:
if self.options.runtime not in SUPPORTED_RUNTIMES:
_PrintErrorAndExit(self.error_fh,
'"%s" is not a supported runtime\n' %
self.options.runtime)
else:
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = (
'|'.join(SUPPORTED_RUNTIMES))
action = self.args.pop(0)
def RaiseParseError(actionname, action):
self.parser, self.options = self._MakeSpecificParser(action)
error_desc = action.error_desc
if not error_desc:
error_desc = "Expected a <directory> argument after '%s'." % (
actionname.split(' ')[0])
self.parser.error(error_desc)
if action == BACKENDS_ACTION:
if len(self.args) < 1:
RaiseParseError(action, self.actions[BACKENDS_ACTION])
backend_action_first = BACKENDS_ACTION + ' ' + self.args[0]
if backend_action_first in self.actions:
self.args.pop(0)
action = backend_action_first
elif len(self.args) > 1:
backend_directory_first = BACKENDS_ACTION + ' ' + self.args[1]
if backend_directory_first in self.actions:
self.args.pop(1)
action = backend_directory_first
if len(self.args) < 1 or action == BACKENDS_ACTION:
RaiseParseError(action, self.actions[action])
if action not in self.actions:
self.parser.error("Unknown action: '%s'\n%s" %
(action, self.parser.get_description()))
self.action = self.actions[action]
if not self.action.uses_basepath or self.options.help:
self.basepath = None
else:
if not self.args:
RaiseParseError(action, self.action)
self.basepath = self.args.pop(0)
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
self.opener = opener
self.file_iterator = file_iterator
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
Returns:
1 on error, 0 if successful.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
if self.wrap_server_error_message:
error_format = ('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---')
else:
error_format = 'Error %d: %s'
print >>self.error_fh, (error_format % (e.code, body.rstrip('\n')))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ''
for action_name in action_names:
desc += ' %s: %s\n' % (action_name, self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + '\n'
desc = self._GetActionDescriptions()
desc = ('Action must be one of:\n%s'
'Use \'help <action>\' for a detailed description.') % desc
parser = self.parser_class(usage='%prog [options] <action>',
description=desc,
formatter=Formatter(),
conflict_handler='resolve')
parser.add_option('-h', '--help', action='store_true',
dest='help', help='Show the help message and exit.')
parser.add_option('-q', '--quiet', action='store_const', const=0,
dest='verbose', help='Print errors only.')
parser.add_option('-v', '--verbose', action='store_const', const=2,
dest='verbose', default=1,
help='Print info level logs.')
parser.add_option('--noisy', action='store_const', const=3,
dest='verbose', help='Print all logs.')
parser.add_option('-s', '--server', action='store', dest='server',
default='appengine.google.com',
metavar='SERVER', help='The App Engine server.')
parser.add_option('--secure', action='store_true', dest='secure',
default=True, help=optparse.SUPPRESS_HELP)
parser.add_option('--insecure', action='store_false', dest='secure',
help='Use HTTP when communicating with the server.')
parser.add_option('-e', '--email', action='store', dest='email',
metavar='EMAIL', default=None,
help='The username to use. Will prompt if omitted.')
parser.add_option('-H', '--host', action='store', dest='host',
metavar='HOST', default=None,
help='Overrides the Host header sent with all RPCs.')
parser.add_option('--no_cookies', action='store_false',
dest='save_cookies', default=True,
help='Do not save authentication cookies to local disk.')
parser.add_option('--skip_sdk_update_check', action='store_true',
dest='skip_sdk_update_check', default=False,
help='Do not check for SDK updates.')
parser.add_option('--passin', action='store_true',
dest='passin', default=False,
help='Read the login password from stdin.')
parser.add_option('-A', '--application', action='store', dest='app_id',
help='Override application from app.yaml file.')
parser.add_option('-V', '--version', action='store', dest='version',
help='Override (major) version from app.yaml file.')
parser.add_option('-r', '--runtime', action='store', dest='runtime',
help='Override runtime from app.yaml file.')
parser.add_option('-R', '--allow_any_runtime', action='store_true',
dest='allow_any_runtime', default=False,
help='Do not validate the runtime in app.yaml')
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description('%s\n%s' % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = self.options.email
if email is None:
email = self.raw_input_fn('Email: ')
password_prompt = 'Password for %s: ' % email
if self.options.passin:
password = self.raw_input_fn(password_prompt)
else:
password = self.password_input_fn(password_prompt)
return (email, password)
StatusUpdate('Host: %s' % self.options.server)
if self.options.host and self.options.host == 'localhost':
email = self.options.email
if email is None:
email = 'test@example.com'
logging.info('Using debug user %s. Override with --email', email)
rpcserver = self.rpc_server_class(
self.options.server,
lambda: (email, 'password'),
GetUserAgent(),
GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
secure=False)
rpcserver.authenticated = True
return rpcserver
if self.options.passin:
auth_tries = 1
else:
auth_tries = 3
return self.rpc_server_class(self.options.server, GetUserCredentials,
GetUserAgent(), GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=auth_tries,
account_type='HOSTED_OR_GOOGLE',
secure=self.options.secure)
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Filename without extension to search for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error('Not a directory: %s' % basepath)
alt_basepath = os.path.join(basepath, "WEB-INF", "appengine-generated")
for yaml_basepath in (basepath, alt_basepath):
for yaml_file in (file_name + '.yaml', file_name + '.yml'):
yaml_path = os.path.join(yaml_basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppYaml(self, basepath, includes=True):
"""Parses the app.yaml file.
Args:
basepath: the directory of the application.
includes: if True builtins and includes will be parsed.
Returns:
An AppInfoExternal object.
"""
appyaml_filename = self._FindYaml(basepath, 'app')
if appyaml_filename is None:
self.parser.error('Directory does not contain an app.yaml '
'configuration file.')
fh = self.opener(appyaml_filename, 'r')
try:
if includes:
appyaml = appinfo_includes.Parse(fh, self.opener)
else:
appyaml = appinfo.LoadSingleAppInfo(fh)
finally:
fh.close()
orig_application = appyaml.application
orig_version = appyaml.version
if self.options.app_id:
appyaml.application = self.options.app_id
if self.options.version:
appyaml.version = self.options.version
if self.options.runtime:
appyaml.runtime = self.options.runtime
msg = 'Application: %s' % appyaml.application
if appyaml.application != orig_application:
msg += ' (was: %s)' % orig_application
if self.action.function is 'Update':
msg += '; version: %s' % appyaml.version
if appyaml.version != orig_version:
msg += ' (was: %s)' % orig_version
StatusUpdate(msg)
return appyaml
def _ParseYamlFile(self, basepath, basename, parser):
"""Parses the a yaml file.
Args:
basepath: the directory of the application.
basename: the base name of the file (with the '.yaml' stripped off).
parser: the function or method used to parse the file.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, basename)
if file_name is not None:
fh = self.opener(file_name, 'r')
try:
defns = parser(fh)
finally:
fh.close()
return defns
return None
def _ParseBackendsYaml(self, basepath):
"""Parses the backends.yaml file.
Args:
basepath: the directory of the application.
Returns:
A BackendsInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'backends',
backendinfo.LoadBackendInfo)
def _ParseIndexYaml(self, basepath):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'index',
datastore_index.ParseIndexDefinitions)
def _ParseCronYaml(self, basepath):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron)
def _ParseQueueYaml(self, basepath):
"""Parses the queue.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'queue', queueinfo.LoadSingleQueue)
def _ParseDosYaml(self, basepath):
"""Parses the dos.yaml file.
Args:
basepath: the directory of the application.
Returns:
A DosInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'dos', dosinfo.LoadSingleDos)
def Help(self, action=None):
"""Prints help for a specific action.
Args:
action: If provided, print help for the action provided.
Expects self.args[0], or 'action', to contain the name of the action in
question. Exits the program after printing the help message.
"""
if not action:
if len(self.args) > 1:
self.args = [' '.join(self.args)]
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error('Expected a single action argument. '
' Must be one of:\n' +
self._GetActionDescriptions())
action = self.args[0]
action = self.actions[action]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def DownloadApp(self):
"""Downloads the given app+version."""
if len(self.args) != 1:
self.parser.error('\"download_app\" expects one non-option argument, '
'found ' + str(len(self.args)) + '.')
out_dir = self.args[0]
app_id = self.options.app_id
if app_id is None:
self.parser.error('You must specify an app ID via -A or --application.')
app_version = self.options.version
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
self.parser.error('Cannot download to path "%s": '
'there\'s a file in the way.' % out_dir)
elif os.listdir(out_dir):
self.parser.error('Cannot download to path "%s": directory already '
'exists and it isn\'t empty.' % out_dir)
rpcserver = self._GetRpcServer()
DoDownloadApp(rpcserver, out_dir, app_id, app_version)
def UpdateVersion(self, rpcserver, basepath, appyaml, backend=None):
"""Updates and deploys a new appversion.
Args:
rpcserver: An AbstractRpcServer instance on which RPC calls can be made.
basepath: The root directory of the version to update.
appyaml: The AppInfoExternal object parsed from app.yaml
backend: The name of the backend to update, if any.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
"""
if self.options.precompilation:
if not appyaml.derived_file_type:
appyaml.derived_file_type = []
if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type:
appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED)
if self.options.skip_sdk_update_check:
logging.info('Skipping update check')
else:
updatecheck = self.update_check_class(rpcserver, appyaml)
updatecheck.CheckForUpdates()
appversion = AppVersionUpload(rpcserver, appyaml, self.options.version,
backend, self.error_fh)
return appversion.DoUpload(
self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime),
lambda path: self.opener(os.path.join(basepath, path), 'rb'),
self.options.max_size)
def Update(self):
"""Updates and deploys a new appversion and global app configs."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath, includes=True)
rpcserver = self._GetRpcServer()
self.UpdateVersion(rpcserver, self.basepath, appyaml)
if self.options.backends:
self.BackendsUpdate()
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'indexes. Please retry later with appcfg.py update_indexes.')
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml)
cron_upload.DoUpload()
queue_yaml = self._ParseQueueYaml(self.basepath)
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml)
queue_upload.DoUpload()
dos_yaml = self._ParseDosYaml(self.basepath)
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml)
dos_upload.DoUpload()
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-S', '--max_size', type='int', dest='max_size',
default=None, metavar='SIZE',
help='DEPRECATED: Maximum size of a file to upload. '
'The server provides the current value; you do not need '
'to override the size except in rare cases.')
parser.add_option('--no_precompilation', action='store_false',
dest='precompilation', default=True,
help='Disable automatic Python precompilation.')
parser.add_option('--backends', action='store_true',
dest='backends', default=False,
help='Update backends when performing appcfg update.')
def VacuumIndexes(self):
"""Deletes unused indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpcserver = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpcserver,
appyaml,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-f', '--force', action='store_true', dest='force_delete',
default=False,
help='Force deletion without being prompted.')
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml)
cron_upload.DoUpload()
def UpdateIndexes(self):
"""Updates indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs)
index_upload.DoUpload()
def UpdateQueues(self):
"""Updates any new or changed task queue definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
queue_yaml = self._ParseQueueYaml(self.basepath)
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml)
queue_upload.DoUpload()
def UpdateDos(self):
"""Updates any new or changed dos definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
dos_yaml = self._ParseDosYaml(self.basepath)
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml)
dos_upload.DoUpload()
def BackendsAction(self):
"""Placeholder; we never expect this action to be invoked."""
pass
def BackendsYamlCheck(self, appyaml, backend=None):
"""Check the backends.yaml file is sane and which backends to update."""
if appyaml.backends:
self.parser.error('Backends are not allowed in app.yaml.')
backends_yaml = self._ParseBackendsYaml(self.basepath)
appyaml.backends = backends_yaml.backends
if not appyaml.backends:
self.parser.error('No backends found in backends.yaml.')
backends = []
for backend_entry in appyaml.backends:
entry = backendinfo.LoadBackendEntry(backend_entry.ToYAML())
if entry.name in backends:
self.parser.error('Duplicate entry for backend: %s.' % entry.name)
else:
backends.append(entry.name)
backends_to_update = []
if backend:
if backend in backends:
backends_to_update = [backend]
else:
self.parser.error("Backend '%s' not found in backends.yaml." %
backend)
else:
backends_to_update = backends
return backends_to_update
def BackendsUpdate(self):
"""Updates a backend."""
self.backend = None
if len(self.args) == 1:
self.backend = self.args[0]
elif len(self.args) > 1:
self.parser.error('Expected an optional <backend> argument.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
backends_to_update = self.BackendsYamlCheck(appyaml, self.backend)
for backend in backends_to_update:
self.UpdateVersion(rpcserver, self.basepath, appyaml, backend)
def BackendsList(self):
"""Lists all backends for an app."""
if self.args:
self.parser.error('Expected no arguments.')
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/list', app_id=appyaml.application)
print >> self.out_fh, response
def BackendsRollback(self):
"""Does a rollback of an existing transaction on this backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
self._Rollback(self.args[0])
def BackendsStart(self):
"""Starts a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/start',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsStop(self):
"""Stops a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/stop',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsDelete(self):
"""Deletes a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/delete',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsConfigure(self):
"""Changes the configuration of an existing backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppYaml(self.basepath)
backends_yaml = self._ParseBackendsYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/configure',
app_id=appyaml.application,
backend=backend,
payload=backends_yaml.ToYAML())
print >> self.out_fh, response
def Rollback(self):
"""Does a rollback of an existing transaction for this app version."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
self._Rollback()
def _Rollback(self, backend=None):
"""Does a rollback of an existing transaction.
Args:
backend: name of a backend to rollback, or None
If a backend is specified the rollback will affect only that backend, if no
backend is specified the rollback will affect the current app version.
"""
appyaml = self._ParseAppYaml(self.basepath)
appversion = AppVersionUpload(self._GetRpcServer(), appyaml,
self.options.version, backend)
appversion.in_transaction = True
appversion.Rollback()
def SetDefaultVersion(self):
"""Sets the default version."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
appyaml = self._ParseAppYaml(self.basepath)
version_setter = DefaultVersionSet(self._GetRpcServer(), appyaml)
version_setter.SetVersion()
def RequestLogs(self):
"""Write request logs to a file."""
if len(self.args) != 1:
self.parser.error(
'Expected a <directory> argument and an <output_file> argument.')
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
try:
end_date = self._ParseEndDate(self.options.end_date)
except (TypeError, ValueError):
self.parser.error('End date must be in the format YYYY-MM-DD.')
rpcserver = self._GetRpcServer()
appyaml = self._ParseAppYaml(self.basepath)
logs_requester = LogsRequester(rpcserver, appyaml, self.args[0],
self.options.num_days,
self.options.append,
self.options.severity,
end_date,
self.options.vhost,
self.options.include_vhost,
self.options.include_all,
time_func=self.time_func)
logs_requester.DownloadLogs()
@staticmethod
def _ParseEndDate(date, time_func=time.time):
"""Translates an ISO 8601 date to a date object.
Args:
date: A date string as YYYY-MM-DD.
time_func: time.time() function for testing.
Returns:
A date object representing the last day of logs to get.
If no date is given, returns today in the US/Pacific timezone.
"""
if not date:
return PacificDate(time_func())
return datetime.date(*[int(i) for i in date.split('-')])
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_days', type='int', dest='num_days',
action='store', default=None,
help='Number of days worth of log data to get. '
'The cut-off point is midnight US/Pacific. '
'Use 0 to get all available logs. '
'Default is 1, unless --append is also given; '
'then the default is 0.')
parser.add_option('-a', '--append', dest='append',
action='store_true', default=False,
help='Append to existing file.')
parser.add_option('--severity', type='int', dest='severity',
action='store', default=None,
help='Severity of app-level log messages to get. '
'The range is 0 (DEBUG) through 4 (CRITICAL). '
'If omitted, only request logs are returned.')
parser.add_option('--vhost', type='string', dest='vhost',
action='store', default=None,
help='The virtual host of log messages to get. '
'If omitted, all log messages are returned.')
parser.add_option('--include_vhost', dest='include_vhost',
action='store_true', default=False,
help='Include virtual host in log messages.')
parser.add_option('--include_all', dest='include_all',
action='store_true', default=None,
help='Include everything in log messages.')
parser.add_option('--end_date', dest='end_date',
action='store', default='',
help='End date (as YYYY-MM-DD) of period for log data. '
'Defaults to today.')
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if self.args:
self.parser.error('Expected a single <directory> argument.')
if now is None:
now = datetime.datetime.now()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml and cron_yaml.cron:
for entry in cron_yaml.cron:
description = entry.description
if not description:
description = '<no description>'
print >>output, '\n%s:\nURL: %s\nSchedule: %s' % (description,
entry.url,
entry.schedule)
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, '%s, %s from now' % (
match.strftime('%Y-%m-%d %H:%M:%S'), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_runs', type='int', dest='num_runs',
action='store', default=5,
help='Number of runs of each cron job to display'
'Default is 5')
def _CheckRequiredLoadOptions(self):
"""Checks that upload/download options are present."""
for option in ['filename']:
if getattr(self.options, option) is None:
self.parser.error('Option \'%s\' is required.' % option)
if not self.options.url:
self.parser.error('You must have google.appengine.ext.remote_api.handler '
'assigned to an endpoint in app.yaml, or provide '
'the url of the handler via the \'url\' option.')
def InferRemoteApiUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffixes = ['remote_api/handler.py',
'remote_api.handler.application']
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, 'script') and handler.script:
if any(handler.script.endswith(suffix) for suffix in handler_suffixes):
server = self.options.server
url = handler.url
if url.endswith('(/.*)?'):
url = url[:-6]
if server == 'appengine.google.com':
return 'http://%s.appspot.com%s' % (app_id, url)
else:
match = re.match(PREFIXED_BY_ADMIN_CONSOLE_RE, server)
if match:
return 'http://%s%s%s' % (app_id, match.group(1), url)
else:
return 'http://%s%s' % (server, url)
return None
def RunBulkloader(self, arg_dict):
"""Invokes the bulkloader with the given keyword arguments.
Args:
arg_dict: Dictionary of arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error('upload_data action requires SQLite3 and the python '
'sqlite3 module (included in python since 2.5).')
sys.exit(1)
sys.exit(bulkloader.Run(arg_dict))
def _SetupLoad(self):
"""Performs common verification and set up for upload and download."""
if len(self.args) != 1 and not self.options.url:
self.parser.error('Expected either --url or a single <directory> '
'argument.')
if len(self.args) == 1:
self.basepath = self.args[0]
appyaml = self._ParseAppYaml(self.basepath, includes=True)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferRemoteApiUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredLoadOptions()
if self.options.batch_size < 1:
self.parser.error('batch_size must be 1 or larger.')
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
def _MakeLoaderArgs(self):
args = dict([(arg_name, getattr(self.options, arg_name, None)) for
arg_name in (
'url',
'filename',
'batch_size',
'kind',
'num_threads',
'bandwidth_limit',
'rps_limit',
'http_limit',
'db_filename',
'config_file',
'auth_domain',
'has_header',
'loader_opts',
'log_file',
'passin',
'email',
'debug',
'exporter_opts',
'mapper_opts',
'result_db_filename',
'mapper_opts',
'dry_run',
'dump',
'restore',
'namespace',
'create_config',
)])
args['application'] = self.options.app_id
args['throttle_class'] = self.throttle_class
return args
def PerformDownload(self, run_fn=None):
"""Performs a datastore download via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Downloading data records.')
args = self._MakeLoaderArgs()
args['download'] = bool(args['config_file'])
args['has_header'] = False
args['map'] = False
args['dump'] = not args['config_file']
args['restore'] = False
args['create_config'] = False
run_fn(args)
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Uploading data records.')
args = self._MakeLoaderArgs()
args['download'] = False
args['map'] = False
args['dump'] = False
args['restore'] = not args['config_file']
args['create_config'] = False
run_fn(args)
def CreateBulkloadConfig(self, run_fn=None):
"""Create a bulkloader config via the bulkloader wizard.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Creating bulkloader configuration.')
args = self._MakeLoaderArgs()
args['download'] = False
args['has_header'] = False
args['map'] = False
args['dump'] = False
args['restore'] = False
args['create_config'] = True
run_fn(args)
def _PerformLoadOptions(self, parser):
"""Adds options common to 'upload_data' and 'download_data'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--url', type='string', dest='url',
action='store',
help='The location of the remote_api endpoint.')
parser.add_option('--batch_size', type='int', dest='batch_size',
action='store', default=10,
help='Number of records to post in each request.')
parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit',
action='store', default=250000,
help='The maximum bytes/second bandwidth for transfers.')
parser.add_option('--rps_limit', type='int', dest='rps_limit',
action='store', default=20,
help='The maximum records/second for transfers.')
parser.add_option('--http_limit', type='int', dest='http_limit',
action='store', default=8,
help='The maximum requests/second for transfers.')
parser.add_option('--db_filename', type='string', dest='db_filename',
action='store',
help='Name of the progress database file.')
parser.add_option('--auth_domain', type='string', dest='auth_domain',
action='store', default='gmail.com',
help='The name of the authorization domain to use.')
parser.add_option('--log_file', type='string', dest='log_file',
help='File to write bulkloader logs. If not supplied '
'then a new log file will be created, named: '
'bulkloader-log-TIMESTAMP.')
parser.add_option('--dry_run', action='store_true',
dest='dry_run', default=False,
help='Do not execute any remote_api calls')
parser.add_option('--namespace', type='string', dest='namespace',
action='store', default='',
help='Namespace to use when accessing datastore.')
parser.add_option('--num_threads', type='int', dest='num_threads',
action='store', default=10,
help='Number of threads to transfer records with.')
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file containing the input data.'
' (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to store.')
parser.add_option('--has_header', dest='has_header',
action='store_true', default=False,
help='Whether the first line of the input file should be'
' skipped')
parser.add_option('--loader_opts', type='string', dest='loader_opts',
help='A string to pass to the Loader.initialize method.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _PerformDownloadOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where output data is to be'
' written. (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to retrieve.')
parser.add_option('--exporter_opts', type='string', dest='exporter_opts',
help='A string to pass to the Exporter.initialize method.'
)
parser.add_option('--result_db_filename', type='string',
dest='result_db_filename',
action='store',
help='Database to write entities to for download.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _CreateBulkloadConfigOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where the generated template'
' is to be written. (Required)')
def ResourceLimitsInfo(self, output=None):
"""Outputs the current resource limits."""
appyaml = self._ParseAppYaml(self.basepath, includes=True)
resource_limits = GetResourceLimits(self._GetRpcServer(), appyaml)
for attr_name in sorted(resource_limits):
print >>output, '%s: %s' % (attr_name, resource_limits[attr_name])
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
error_desc: An error message to display when the incorrect arguments are
given.
options: A function that will add extra options to a given OptionParser
object.
uses_basepath: Does the action use a basepath/app-directory (and hence
app.yaml).
"""
def __init__(self, function, usage, short_desc, long_desc='',
error_desc=None, options=lambda obj, parser: None,
uses_basepath=True):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.error_desc = error_desc
self.options = options
self.uses_basepath = uses_basepath
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides.
Args:
appcfg: The appcfg to use.
Returns:
The result of the function call.
"""
method = getattr(appcfg, self.function)
return method()
actions = {
'help': Action(
function='Help',
usage='%prog help <action>',
short_desc='Print help for a specific action.',
uses_basepath=False),
'update': Action(
function='Update',
usage='%prog [options] update <directory> [version]',
options=_UpdateOptions,
short_desc='Create or update an app version.',
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
'download_app': Action(
function='DownloadApp',
usage='%prog [options] download_app -A app_id [ -V version ]'
' <out-dir>',
short_desc='Download a previously-uploaded app.',
long_desc="""
Download a previously-uploaded app to the specified directory. The app
ID is specified by the \"-A\" option. The optional version is specified
by the \"-V\" option.""",
uses_basepath=False),
'update_cron': Action(
function='UpdateCron',
usage='%prog [options] update_cron <directory>',
short_desc='Update application cron definitions.',
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the optional cron.yaml file."""),
'update_indexes': Action(
function='UpdateIndexes',
usage='%prog [options] update_indexes <directory>',
short_desc='Update application indexes.',
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
'update_queues': Action(
function='UpdateQueues',
usage='%prog [options] update_queues <directory>',
short_desc='Update application task queue definitions.',
long_desc="""
The 'update_queue' command will update any new, removed or changed task queue
definitions from the optional queue.yaml file."""),
'update_dos': Action(
function='UpdateDos',
usage='%prog [options] update_dos <directory>',
short_desc='Update application dos definitions.',
long_desc="""
The 'update_dos' command will update any new, removed or changed dos
definitions from the optional dos.yaml file."""),
'backends': Action(
function='BackendsAction',
usage='%prog [options] backends <directory> <action>',
short_desc='Perform a backend action.',
long_desc="""
The 'backends' command will perform a backends action.""",
error_desc="""\
Expected a <directory> and <action> argument."""),
'backends list': Action(
function='BackendsList',
usage='%prog [options] backends <directory> list',
short_desc='List all backends configured for the app.',
long_desc="""
The 'backends list' command will list all backends configured for the app."""),
'backends update': Action(
function='BackendsUpdate',
usage='%prog [options] backends <directory> update [<backend>]',
options=_UpdateOptions,
short_desc='Update one or more backends.',
long_desc="""
The 'backends update' command updates one or more backends. This command
updates backend configuration settings and deploys new code to the server. Any
existing instances will stop and be restarted. Updates all backends, or a
single backend if the <backend> argument is provided."""),
'backends rollback': Action(
function='BackendsRollback',
usage='%prog [options] backends <directory> rollback <backend>',
short_desc='Roll back an update of a backend.',
long_desc="""
The 'backends update' command requires a server-side transaction.
Use 'backends rollback' if you experience an error during 'backends update'
and want to start the update over again."""),
'backends start': Action(
function='BackendsStart',
usage='%prog [options] backends <directory> start <backend>',
short_desc='Start a backend.',
long_desc="""
The 'backends start' command will put a backend into the START state."""),
'backends stop': Action(
function='BackendsStop',
usage='%prog [options] backends <directory> stop <backend>',
short_desc='Stop a backend.',
long_desc="""
The 'backends start' command will put a backend into the STOP state."""),
'backends delete': Action(
function='BackendsDelete',
usage='%prog [options] backends <directory> delete <backend>',
short_desc='Delete a backend.',
long_desc="""
The 'backends delete' command will delete a backend."""),
'backends configure': Action(
function='BackendsConfigure',
usage='%prog [options] backends <directory> configure <backend>',
short_desc='Reconfigure a backend without stopping it.',
long_desc="""
The 'backends configure' command performs an online update of a backend, without
stopping instances that are currently running. No code or handlers are updated,
only certain configuration settings specified in backends.yaml. Valid settings
are: instances, options: public, and options: failfast."""),
'vacuum_indexes': Action(
function='VacuumIndexes',
usage='%prog [options] vacuum_indexes <directory>',
options=_VacuumIndexesOptions,
short_desc='Delete unused indexes from application.',
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
'rollback': Action(
function='Rollback',
usage='%prog [options] rollback <directory>',
short_desc='Rollback an in-progress update.',
long_desc="""
The 'update' command requires a server-side transaction.
Use 'rollback' if you experience an error during 'update'
and want to begin a new update transaction."""),
'request_logs': Action(
function='RequestLogs',
usage='%prog [options] request_logs <directory> <output_file>',
options=_RequestLogsOptions,
short_desc='Write request logs in Apache common log format.',
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written.""",
error_desc="""\
Expected a <directory> and <output_file> arguments."""),
'cron_info': Action(
function='CronInfo',
usage='%prog [options] cron_info <directory>',
options=_CronInfoOptions,
short_desc='Display information about cron jobs.',
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
'upload_data': Action(
function='PerformUpload',
usage='%prog [options] upload_data <directory>',
options=_PerformUploadOptions,
short_desc='Upload data records to datastore.',
long_desc="""
The 'upload_data' command translates input records into datastore entities and
uploads them into your application's datastore.""",
uses_basepath=False),
'download_data': Action(
function='PerformDownload',
usage='%prog [options] download_data <directory>',
options=_PerformDownloadOptions,
short_desc='Download entities from datastore.',
long_desc="""
The 'download_data' command downloads datastore entities and writes them to
file as CSV or developer defined format.""",
uses_basepath=False),
'create_bulkloader_config': Action(
function='CreateBulkloadConfig',
usage='%prog [options] create_bulkload_config <directory>',
options=_CreateBulkloadConfigOptions,
short_desc='Create a bulkloader.yaml from a running application.',
long_desc="""
The 'create_bulkloader_config' command creates a bulkloader.yaml configuration
template for use with upload_data or download_data.""",
uses_basepath=False),
'set_default_version': Action(
function='SetDefaultVersion',
usage='%prog [options] set_default_version <directory>',
short_desc='Set the default (serving) version.',
long_desc="""
The 'set_default_version' command sets the default (serving) version of the app.
Defaults to using the version specified in app.yaml; use the --version flag to
override this."""),
'resource_limits_info': Action(
function='ResourceLimitsInfo',
usage='%prog [options] resource_limits_info <directory>',
short_desc='Get the resource limits.',
long_desc="""
The 'resource_limits_info' command prints the current resource limits that
are enforced."""),
}
def main(argv):
logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s '))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate('Interrupted.')
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
adviti/melange
|
thirdparty/google_appengine/google/appengine/tools/appcfg.py
|
Python
|
apache-2.0
| 129,924
|
[
"VisIt"
] |
dbb73a577dee1ffa04ce61da33559426eee6d182a52016498e78c2bc2655f316
|
import ply.lex as lex
import ply.yacc as yacc
import boilerplategen
import cgen
import collect_id as ci
import define_arguments as darg
import kernelgen
import lan
import rewriter
import stencil
import transpose as tp
fileprefix = "../../test/C/"
SetNoReadBack = True
DoOptimizations = True
def __get_ast_from_file(foldername, filename):
cparser = yacc.yacc(module=lan)
lex.lex(module=lan)
fullfilename = fileprefix + foldername + '/' + filename
try:
f = open(fullfilename, 'r')
s = f.read()
f.close()
except EOFError:
print('file %s wasn\'t found', fullfilename)
lex.input(s)
while 1:
tok = lex.token()
if not tok: break
## print tok
ast = cparser.parse(s)
return ast
def __get_baseform_name(name):
return fileprefix + name + '/' + __get_baseform_filename(name)
def __get_baseform_filename(name):
return 'baseform_' + name.lower() + '.cpp'
def _create_baseform(name):
ast = __get_ast_from_init(name)
cprint = cgen.CGenerator()
baseform_filename = __get_baseform_name(name)
cprint.write_ast_to_file(ast, filename=baseform_filename)
def __get_ast_from_init(name):
ast = __get_ast_from_file(name, name + 'For.cpp')
ast.ext.append(lan.ProgramName(name))
rw = rewriter.Rewriter()
rw.rewrite_array_ref(ast)
rw.rewrite_to_baseform(ast, name + 'For')
return ast
def __get_ast_from_base(name):
ast = __get_ast_from_file(name, __get_baseform_filename(name))
return ast
def gen_full_code(ast):
kgen = kernelgen.KernelGen(ast, fileprefix)
cprint = cgen.CGenerator()
kgen.generate_kernels()
boilerplate = boilerplategen.Boilerplate(ast, SetNoReadBack)
boilerast = boilerplate.generate_code()
name = ci.get_program_name(ast)
cprint.write_ast_to_file(boilerast, filename=fileprefix + name + '/' + 'boilerplate.cpp')
def matmul():
name = 'MatMul'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def __optimize(ast, par_dim=None):
ast.ext.append(lan.ParDim(par_dim))
name = ci.get_program_name(ast)
if DoOptimizations:
__main_transpose(ast)
__main_placeinreg(ast)
if name == 'Jacobi':
__main_stencil(ast)
__main_placeinlocal(ast)
__main_definearg(ast)
gen_full_code(ast)
def knearest():
name = 'KNearest'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast, par_dim=1)
def jacobi():
name = 'Jacobi'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def nbody():
name = 'NBody'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def laplace():
name = 'Laplace'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast, par_dim=1)
def gaussian():
name = 'GaussianDerivates'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def __main_transpose(ast):
tps = tp.Transpose(ast)
tps.transpose()
def __main_definearg(ast):
dargs = darg.DefineArguments(ast)
dargs.define_arguments()
def __main_placeinreg(ast):
pass
def __main_placeinlocal(ast):
pass
def __main_stencil(ast):
sten = stencil.Stencil(ast)
sten.stencil(['X1'], west=1, north=1, east=1, south=1, middle=0)
if __name__ == "__main__":
matmul()
knearest()
jacobi()
nbody()
laplace()
gaussian()
|
dikujepsen/OpenTran
|
v2.0/framework/Matmul/main.py
|
Python
|
mit
| 3,736
|
[
"Gaussian"
] |
ed087760fdea8a194cc1bc9ad4e7e4edfd9fb802e40395e2fc285552e89ea064
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_random_state, as_float_array, deprecated
from sklearn.utils.extmath import norm
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.neighbors import kneighbors_graph
from sklearn.manifold import spectral_embedding
from sklearn.cluster.k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
k=None, eigen_tol=0.0,
assign_labels='kmeans',
mode=None):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
if not k is None:
warnings.warn("'k' was renamed to n_clusters and will "
"be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels, maps
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma: float
Scaling factor of RBF, polynomial, exponential chi² and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10, k=None,
eigen_tol=0.0, assign_labels='kmeans', mode=None,
degree=3, coef0=1, kernel_params=None):
if k is not None:
warnings.warn("'k' was renamed to n_clusters and "
"will be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if mode is not None:
warnings.warn("'mode' was renamed to eigen_solver and "
"will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
@property
@deprecated("'mode' was renamed to eigen_solver and will be removed in"
" 0.15.")
def mode(self):
return self.eigen_solver
@property
@deprecated("'k' was renamed to n_clusters and will be removed in"
" 0.15.")
def k(self):
return self.n_clusters
|
e-koch/SCIMES
|
scimes/spectral.py
|
Python
|
gpl-2.0
| 19,146
|
[
"Brian",
"Gaussian"
] |
50fcf92e54776bc074c85988ad9e21e1d74df08fb4b3a0bd681e914007e3ccb5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""
USAGE: $ python -m gensim.scripts.word2vec2tensor --input <Word2Vec model file> --output <TSV tensor filename prefix> [--binary] <Word2Vec binary flag>
Where:
<Word2Vec model file>: Input Word2Vec model
<TSV tensor filename prefix>: 2D tensor TSV output file name prefix
<Word2Vec binary flag>: Set True if Word2Vec model is binary. Defaults to False.
Output:
The script will create two TSV files. A 2d tensor format file, and a Word Embedding metadata file. Both files will
us the --output file name as prefix
This script is used to convert the word2vec format to Tensorflow 2D tensor and metadata formats for Embedding Visualization
To use the generated TSV 2D tensor and metadata file in the Projector Visualizer, please
1) Open http://projector.tensorflow.org/.
2) Choose "Load Data" from the left menu.
3) Select "Choose file" in "Load a TSV file of vectors." and choose you local "_tensor.tsv" file
4) Select "Choose file" in "Load a TSV file of metadata." and choose you local "_metadata.tsv" file
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
"""
import os
import sys
import logging
import argparse
import gensim
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""
Convert Word2Vec mode to 2D tensor TSV file and metadata file
Args:
word2vec_model_path (str): word2vec model file path
tensor_filename (str): filename prefix
binary (bool): set True to use a binary Word2Vec model, defaults to False
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with open(outfiletsv, 'w+') as file_vector:
with open(outfiletsvmeta, 'w+') as file_metadata:
for word in model.index2word:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(vector_row + '\n')
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True, help="Input word2vec model")
parser.add_argument("-o", "--output", required=True, help="Output tensor file name prefix")
parser.add_argument("-b", "--binary", required=False, help="If word2vec model in binary format, set True, else False")
args = parser.parse_args()
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
|
macks22/gensim
|
gensim/scripts/word2vec2tensor.py
|
Python
|
lgpl-2.1
| 3,301
|
[
"VisIt"
] |
9dde73b43e026c36058d2e84028afb34cf72d5ed7d97cd83687b8c6fac77b795
|
#!/usr/bin/env python
""" load Sample into OSDF using info from data file """
import os
import re
from cutlass.Sample import Sample
import settings
from cutlass_utils import \
load_data, get_parent_node_id, list_tags, format_query, \
write_csv_headers, values_to_node_dict, write_out_csv, \
load_node, get_field_header, dump_args, log_it
filename=os.path.basename(__file__)
log = log_it(filename)
# the Higher-Ups
node_type = 'Sample'
parent_type = 'Visit'
grand_parent_type = 'Subject'
great_parent_type = 'Study'
node_tracking_file = settings.node_id_tracking.path
class node_values:
name = '' # sample_name
body_site = ''
fma_body_site = ''
mixs = {}
tags = []
class mixs_fields:
biome = ''
body_product = ''
collection_date = ''
env_package = ''
feature = ''
geo_loc_name = ''
lat_lon = ''
material = ''
project_name = ''
rel_to_oxygen = ''
samp_collect_device = ''
samp_mat_process = ''
samp_size = ''
source_mat_id = []
def generate_mixs(row):
""" create dict of all variables for the mixs variable """
try:
mixs = {
'biome': 'blood [ENVO:02000020]',
'body_product': row['body_site'],
'collection_date': ('2112-12-21'), #not allowed by IRB!
'env_package': 'human-gut',
'feature': 'N/A',
'geo_loc_name': 'Palo Alto, CA, USA',
'lat_lon': '37.441883, -122.143019',
'material': 'blood(ENVO:02000020)',
'project_name': 'iHMP',
'rel_to_oxygen': 'N/A',
'samp_collect_device': 'N/A',
'samp_mat_process': 'N/A',
'samp_size': 'N/A',
'source_mat_id': [],
}
return mixs
except Exception as e:
log.warn('Conversion to MIXS format failed?! (SampleName: {}).\n'
' Exception message:{}'.format(row['sample_name'],
e.message))
def load(internal_id, search_field):
"""search for existing node to update, else create new"""
# node-specific variables:
NodeTypeName = 'Sample'
NodeLoadFunc = 'load_sample'
return load_node(internal_id, search_field, NodeTypeName, NodeLoadFunc)
def validate_record(parent_id, node, record, data_file_name=node_type):
"""update record fields
validate node
if valid, save, if not, return false
"""
log.info("in validate/save: "+node_type)
csv_fieldnames = get_field_header(data_file_name)
write_csv_headers(data_file_name, fieldnames=csv_fieldnames)
node.name = record['sample_name_id']
node.body_site = record['body_site'].lower()
fma_body_site = record['fma_body_site']
node.fma_body_site = fma_body_site
node.mixs = generate_mixs(record)
node.tags = list_tags(node.tags,
# 'test', # for debug!!
'stanford_id: ' + record['sample_name_id'],
'visit id: ' +record['visit_id'],
'subject id: ' +record['rand_subject_id'],
'study: ' +'prediabetes',
'sub_study: ' +record['sub_study'],
'visit type: ' +record['visit_type']
)
# node._attribs = record['attributes']
parent_link = {'collected_during':[parent_id]}
log.debug('parent_id: '+str(parent_id))
node.links = parent_link
csv_fieldnames = get_field_header(data_file_name)
if not node.is_valid():
write_out_csv(data_file_name+'_invalid_records.csv',
fieldnames=csv_fieldnames, values=[record,])
invalidities = node.validate()
err_str = "Invalid {}!\n\t{}".format(node_type, str(invalidities))
log.error(err_str)
# raise Exception(err_str)
elif node.save():
write_out_csv(data_file_name+'_submitted.csv',
fieldnames=csv_fieldnames, values=[record,])
return node
else:
write_out_csv(data_file_name+'_unsaved_records.csv',
fieldnames=csv_fieldnames, values=[record,])
return False
def submit(data_file, id_tracking_file=node_tracking_file):
log.info('Starting submission of %ss.', node_type)
nodes = []
csv_fieldnames = get_field_header(data_file)
write_csv_headers(data_file, fieldnames=csv_fieldnames)
for record in load_data(data_file):
# check not 'unknown' jaxid, not missing visit info
if len(record['visit_id']) > 0:
log.debug('\n...next record...')
try:
log.debug('data record: '+str(record))
# Node-Specific Variables:
load_search_field = 'name'
internal_id = record['sample_name_id']
parent_internal_id = record['visit_id']
grand_parent_internal_id = record['rand_subject_id']
parent_id = get_parent_node_id(
id_tracking_file, parent_type, parent_internal_id)
node_is_new = False # set to True if newbie
node = load(internal_id, load_search_field)
if not getattr(node, load_search_field):
log.debug('loaded node newbie...')
node_is_new = True
saved = validate_record(parent_id, node, record,
data_file_name=data_file)
if saved:
header = settings.node_id_tracking.id_fields
saved_name = getattr(saved, load_search_field)
vals = values_to_node_dict(
[[node_type.lower(), saved_name, saved.id,
parent_type.lower(), parent_internal_id, parent_id]],
header
)
nodes.append(vals)
if node_is_new:
write_out_csv(id_tracking_file,
fieldnames=get_field_header(id_tracking_file),
values=vals)
except Exception, e:
log.exception(e)
raise e
else:
write_out_csv(data_file+'_records_no_submit.csv',
fieldnames=record.keys(), values=[record,])
return nodes
# if __name__ == '__main__':
# pass
|
TheJacksonLaboratory/osdf_submit
|
nodes/host_Seq_Prep_Sample.py
|
Python
|
gpl-3.0
| 6,357
|
[
"VisIt"
] |
e83feae054e596bee15da49bc88c57ae02045a7c9e2ffc3c35d9659d710d31f2
|
# For each PDB in kinDB.xml, download the SIFTS residue-mapping .xml file
# Add experimental and resolved sequences to kinDB (numbered according to the UniProt sequence)
# Also add alignments of these sequences against the UniProt sequence
#
# Daniel L. Parton <partond@mskcc.org> - 10 Apr 2013
#
# Perhaps get DSSP info from here: http://www.rcsb.org/pdb/rest/das/pdbchainfeatures/features?segment=5pti.A
#
# TODO get secondary structure info from SIFTS files
#==============================================================================
# IMPORTS
#==============================================================================
import sys,os,gzip
from lxml import etree
from choderalab.pdb import retrieve_sifts
from choderalab.core import seqwrap
from Bio.PDB import to_one_letter_code
#==============================================================================
# PARAMETERS
#==============================================================================
kinDB_path = 'kinDB.xml'
okinDB_path = 'kinDB-pdb.xml'
structures_dir = os.path.join('..', 'structures')
local_sifts_path = os.path.join(structures_dir, 'sifts')
#==============================================================================
# MAIN
#==============================================================================
# Read in the kinDB XML document
print 'Reading', kinDB_path
parser = etree.XMLParser(remove_blank_text=True)
kinDB = etree.parse(kinDB_path, parser).getroot()
nkinases = len(kinDB)
print 'Number of kinases:', nkinases
for k in range(nkinases):
# For each PDB in kinDB.xml, download the SIFTS residue-mapping .xml file if it is not already present
pdb_nodes = kinDB[k].findall('pk_pdb')
uniprot_sequence = kinDB[k].findtext('uniprot/sequence').strip()
uniprot_sequence = ''.join(uniprot_sequence.split('\n'))
uniprotAC = kinDB[k].find('uniprot').get('AC')
entry_name = kinDB[k].find('uniprot').get('entry_name')
#if uniprotAC != 'P31751':
#if uniprotAC != 'Q00532':
#if uniprotAC != 'P07333':
#if uniprotAC != 'Q16539':
#if uniprotAC != 'Q16539':
#if uniprotAC != 'O14965':
# continue
for pdb_node in pdb_nodes:
pdbid = pdb_node.get('id')
#if pdbid == '1O6L':
# sys.exit()
local_sifts_file_path = os.path.join(local_sifts_path, pdbid+'.xml.gz')
if os.path.exists(local_sifts_file_path):
pass
else:
print 'Downloading SIFTS file and saving as (compressed):', local_sifts_file_path
page = retrieve_sifts(pdbid)
with gzip.open(local_sifts_file_path, 'wb') as local_sifts_file:
local_sifts_file.write(page + '\n')
# Parse the sifts XML document
with gzip.open(local_sifts_file_path,'rb') as local_sifts_file:
sifts = etree.parse(local_sifts_file, parser).getroot()
# Get the chains to be searched from kinDB
kinDB_chain_nodes = pdb_node.findall('chain')
for chain_node in kinDB_chain_nodes:
chainid = chain_node.get('id')
# First check whether the first residue with matching chainid and a UniProt crossref has the same UniProt AC as was picked up from UniProt (by gather-uniprot.py).
# 3O50 and 3O51 are picked up by gather-uniprot.py from uniprot AC O14965. But these have uniprot AC B4DX16 in the sifts .xml files, which is a TrEMBL entry. Sequences are almost identical except for deletion of ~70 residues prior to PK domain of B4DX16. This means that experimental_sequence_aln and related sequences are not added by gather-pdb.py. Need to sort out a special case for these pdbs. Should check for similar cases in other kinases.
# 3O50 and 3O51 can be ignored. (Plenty of other PDBs for that protein)
# 3OG7 is picked up from uniprot AC P15056, but the PDB entry links to Q5IBP5 - this is the AKAP9-BRAF fusion protein.
# XXX TODO XXX 3OG7 will be ignored for now, but at some point should make separate entries for fusion proteins, and add the PDB files accordingly.
first_matching_uniprot_resi = sifts.find('entity[@type="protein"]/segment/listResidue/residue/crossRefDb[@dbSource="PDB"][@dbChainId="%s"]/../crossRefDb[@dbSource="UniProt"]' % chainid)
sifts_uniprotAC = first_matching_uniprot_resi.get('dbAccessionId')
if uniprotAC != sifts_uniprotAC:
print 'PDB %s chain %s picked up from UniProt entry %s %s. Non-matching UniProtAC in sifts: %s. This pk_pdb entry will be deleted when outputting %s' % (pdbid, chainid, entry_name, uniprotAC, sifts_uniprotAC, okinDB_path)
chain_node.set('DELETE_ME','')
#
#
# TODO check if there are any PDBs where two proteins share the same chainid (I seem to remember that there are - check previous scripts)
#
#
# Now extract the sequence data
# These are the sifts residues which include a PDB crossref with matching chainid
chain_residues = sifts.findall('entity[@type="protein"]/segment/listResidue/residue/crossRefDb[@dbSource="PDB"][@dbChainId="%s"]/..' % chainid)
experimental_sequence = ''
experimental_sequence_pdb_resids = []
experimental_sequence_uniprot_res_indices = []
observed_sequence = ''
experimental_sequence_aln = ['-'] * len(uniprot_sequence) # This will contain the alignment of the experimental sequence against the full UniProt sequence. Conflicting residues will be added if they are contiguous with non-conflicting segments
experimental_sequence_aln_conflicts = ['-'] * len(uniprot_sequence) # Same, but conflicting residues are added as 'x' if they have no UniProt crossref, or 'c' if they have a UniProt crossref but also have 'Conflict' residueDetail
n_crossref_uniprot_matches = 0
for r in chain_residues:
residue_details = [ detail.text.strip() for detail in r.findall('residueDetail') ] # list of strings
resname = r.attrib['dbResName']
if resname == None:
print 'WARNING: UniProt crossref not found for conflicting residue!', k, pdbid, chainid, r.attrib
sys.exit()
try:
# Note that this BioPython dict converts a modified aa to the single-letter code of its unmodified parent (e.g. "TPO":"T")
single_letter = to_one_letter_code[ resname ]
except KeyError:
if r.attrib['dbResName'] == 'ACE': # Just ignore N-terminal ACE
continue
# Add residue to experimental_sequence
experimental_sequence += single_letter
# Also save the pdb resids, which we will use later
pdb_resid = r.find('crossRefDb[@dbSource="PDB"]').attrib['dbResNum']
# Some pdb resids are e.g. '464A'
if pdb_resid.isdigit() == False:
if pdbid in ['1O6L','2JDO','2JDR','2UW9','2X39','2XH5']: # These pdbs include three residues with pdb resids 464A, 464B, 464C, (all with UniProt crossrefs) then continue from 465. We will change this so that the pdb resids continue to iterate
corrected_pdb_resids = {'464A':465, '464B':466, '464C':467}
if pdb_resid in corrected_pdb_resids.keys():
pdb_resid = corrected_pdb_resids[pdb_resid]
elif int(pdb_resid[0:3]) > 464:
pdb_resid = int(pdb_resid) + 3
# Otherwise just extract the number (this will also detect negative numbers)
else:
pdb_resid = ''.join([char for char in pdb_resid if (char.isdigit() or char == '-')])
try:
experimental_sequence_pdb_resids.append( int(pdb_resid) )
except:
print uniprotAC, pdbid, chainid, pdb_resid
sys.exit()
# Also add residue to experimental_sequence_aln. Residues which do not match the uniprot sequence (and thus do not have a uniprot crossref) will be added later
crossref_uniprot = r.find('crossRefDb[@dbSource="UniProt"][@dbAccessionId="%s"]' % uniprotAC)
if crossref_uniprot != None:
n_crossref_uniprot_matches += 1
index = int(crossref_uniprot.attrib['dbResNum']) - 1
experimental_sequence_aln[index] = single_letter
if 'Conflict' in residue_details:
experimental_sequence_aln_conflicts[index] = 'c'
else:
experimental_sequence_aln_conflicts[index] = single_letter
experimental_sequence_uniprot_res_indices.append(index)
else:
experimental_sequence_uniprot_res_indices.append(None)
pass
# Add residue to observed_sequence if it is observed, otherwise '-'
if 'Not_Observed' in residue_details:
observed_sequence += '-'
else:
observed_sequence += single_letter
# ======
# Now we add the residues which do not have a uniprot crossref
# ======
#print k, uniprotAC, pdbid, chainid
#print experimental_sequence
#print experimental_sequence_pdb_resids
#print ''.join(experimental_sequence_aln_conflicts)
i = 0
# But first we have to deal with cases where residues have been added at the N-terminus which extend before the start of the uniprot sequence.
# Get the uniprot residue index of the first residue with a uniprot crossref
for s in range(len(experimental_sequence_uniprot_res_indices)):
UP_res_index = experimental_sequence_uniprot_res_indices[s]
if UP_res_index != None:
first_exp_seq_uniprot_res_index = UP_res_index
# And the corresponding pdb resid
corresponding_pdb_resid = experimental_sequence_pdb_resids[s]
exp_seq_first_uniprot_res_index = s
break
# And get the pdb resid of the first residue in the experimental sequence
for s in experimental_sequence_pdb_resids:
if s != None:
first_exp_seq_pdb_resid = s
break
ignore_excess_Nterm_residues_flag = False
# If the experimental sequence includes the first residue of the full uniprot sequence
if first_exp_seq_uniprot_res_index == 0:
# And if the value of the first pdb resid is lower than that of the pdb resid corresponding to the first uniprot residue
if first_exp_seq_pdb_resid < corresponding_pdb_resid:
# Then we will ignore the excess residues
ignore_excess_Nterm_residues_flag = True
while i < len(experimental_sequence):
resname_i = experimental_sequence[i]
uniprot_res_index_i = experimental_sequence_uniprot_res_indices[i]
pdb_resid_i = experimental_sequence_pdb_resids[i]
if (ignore_excess_Nterm_residues_flag == True) and (pdb_resid_i < corresponding_pdb_resid):
pass # we ignore these residues
# If this residue does not have a uniprot crossref
elif uniprot_res_index_i == None:
# Start a list of residues with no uniprot crossref
contiguous_noUP_residues = [ resname_i ]
# Then check the next residue
j = i + 1
while j < len(experimental_sequence):
resname_j = experimental_sequence[j]
uniprot_res_index_j = experimental_sequence_uniprot_res_indices[j]
pdb_resid_j = experimental_sequence_pdb_resids[j]
#print 'len, i, j:', len(experimental_sequence), i, j, pdb_resid_i, pdb_resid_j, contiguous_noUP_residues
# If this residue also has no uniprot crossref, and is contiguous in terms of pdb resnum, then add it to the list, and move on to the next one
if (uniprot_res_index_j == None) and ((pdb_resid_j - pdb_resid_i) == (j-i)):
#print 'adding to list:', j, resname_j
contiguous_noUP_residues.append( resname_j )
pass
# If this residue does have a uniprot crossref, and if it is contiguous in terms of pdb resnum, then we add the list of residues without uniprot crossrefs at this position
elif (uniprot_res_index_j != None) and ((pdb_resid_j - pdb_resid_i) == (j-i)):
#print 'adding to sequence_aln:', j
experimental_sequence_aln[ (uniprot_res_index_j - j) : uniprot_res_index_j ] = contiguous_noUP_residues
experimental_sequence_aln_conflicts[ (uniprot_res_index_j - j) : uniprot_res_index_j ] = 'x' * len(contiguous_noUP_residues)
i = j
break
# If this residue is not contiguous in terms of pdb resnum, go back and check if the first of contiguous_noUP_residues is pdb-contiguous with the previous residue - if so, add contiguous_noUP_residues
elif (pdb_resid_j - pdb_resid_i) != (j-i):
#print 'checking backwards:', j
if (pdb_resid_i - experimental_sequence_pdb_resids[i-1]) == 1:
last_uniprot_res_index = experimental_sequence_uniprot_res_indices[i-1]
experimental_sequence_aln[ last_uniprot_res_index + 1 : last_uniprot_res_index + 1 + (j-i)] = contiguous_noUP_residues
experimental_sequence_aln_conflicts[ last_uniprot_res_index + 1 : last_uniprot_res_index + 1 + (j-i)] = 'x' * len(contiguous_noUP_residues)
i = j - 1
break
# If we have reached the end of experimental_sequence, go back and check if the first of contiguous_noUP_residues is pdb-contiguous with the previous residue - if so, add contiguous_noUP_residues
if j == len(experimental_sequence) - 1:
#print 'THIS IS THE END', len(experimental_sequence), i, j, pdb_resid_i, experimental_sequence_pdb_resids[i], experimental_sequence_pdb_resids[i-1], contiguous_noUP_residues
#print experimental_sequence_pdb_resids
if (pdb_resid_i - experimental_sequence_pdb_resids[i-1]) == 1:
last_uniprot_res_index = experimental_sequence_uniprot_res_indices[i-1]
experimental_sequence_aln[ last_uniprot_res_index + 1 : last_uniprot_res_index + 2 + (j-i)] = contiguous_noUP_residues
experimental_sequence_aln_conflicts[ last_uniprot_res_index + 1 : last_uniprot_res_index + 2 + (j-i)] = 'x' * len(contiguous_noUP_residues)
i = j
break
j += 1
i += 1
# In cases such as 3LAU and 1O6L, additional sequence at end makes experimental_sequence_aln longer than uniprot_sequence by 1
if len(experimental_sequence_aln) != len(uniprot_sequence):
experimental_sequence_aln = experimental_sequence_aln[0:len(uniprot_sequence)]
experimental_sequence_aln_conflicts = experimental_sequence_aln_conflicts[0:len(uniprot_sequence)]
#print ''.join(experimental_sequence_aln_conflicts)
# Now add the various sequence data to kinDB
experimental_sequence_aln = ''.join(experimental_sequence_aln)
experimental_sequence_aln_conflicts = ''.join(experimental_sequence_aln_conflicts)
#print k, pdbid, chainid, len(experimental_sequence), len(observed_sequence)
#print k, pdbid, chainid, experimental_sequence_aln
exp = etree.SubElement(chain_node,'experimental')
etree.SubElement(exp,'sequence').text = '\n' + seqwrap(experimental_sequence)
exp.set('length', str(len(experimental_sequence)))
etree.SubElement(exp,'sequence_aln').text = '\n' + seqwrap(experimental_sequence_aln)
etree.SubElement(exp,'sequence_aln_conflicts').text = '\n' + seqwrap(experimental_sequence_aln_conflicts)
obs = etree.SubElement(chain_node,'observed')
etree.SubElement(obs,'sequence').text = '\n' + seqwrap(observed_sequence)
#if pdbid == '2W1C':
#if pdbid == '3LAU':
#if pdbid == '1O6L':
if pdbid == '3O50':
#sys.exit()
pass
# Only add if the chain matches that in the kinDB
# =======================
# Delete pk_pdb/chain entries with @DELETE_ME attrib. These were cases where the sifts_uniprotAC did not match the uniprotAC in kinDB (derived from the UniProt entry by gather-uniprot.py)
# =======================
pk_pdbs_with_chains_to_be_deleted = set( kinDB.findall('kinase/pk_pdb/chain[@DELETE_ME=""]/..') )
for p in pk_pdbs_with_chains_to_be_deleted:
chains_to_be_deleted = p.findall('chain[@DELETE_ME=""]')
for d in chains_to_be_deleted:
p.remove(d)
# If the pk_pdb node now has no children (almost certainly the case), delete that too
if len(p.getchildren()) == 0:
kinase = p.getparent()
kinase.remove(p)
# write the XML DB
#ofile = open(okinDB_path , 'w')
#ofile.write( etree.tostring(kinDB, pretty_print=True) )
#ofile.close()
print 'Done.'
|
choderalab/TargetExplorer
|
scripts/attic/gather-pdb.py
|
Python
|
gpl-2.0
| 18,125
|
[
"Biopython"
] |
1e48ac30ae08eb11bbc132235e4a8d0ff74ae2a9139696b828152734404e8c9c
|
r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
Functions
---------
.. autosummary::
:toctree: generated/
subsample_power
subsample_paired_power
confidence_bound
paired_subsamples
bootstrap_power_curve
Examples
--------
Suppose we wanted to test that there's a relationship between two random
variables, `ind` and `dep`. Let's use random subsampling to estimate the
statistical power of our test with an alpha of 0.1, 0.01, and 0.001.
To control for the pseudo-random number generation, we will use a seed.
When using these functions with your own data, you don't need to include the
step.
>>> import numpy as np
>>> np.random.seed(20)
>>> ind = np.random.randint(0, 20, 15)
>>> ind
array([ 3, 15, 9, 11, 7, 2, 0, 8, 19, 16, 6, 6, 16, 9, 5])
>>> dep = (3 * ind + 5 + np.random.randn(15) * 5).round(3)
>>> dep
array([ 15.617, 47.533, 28.04 , 33.788, 19.602, 12.229, 4.779,
36.838, 67.256, 55.032, 22.157, 7.051, 58.601, 38.664,
18.783])
Let's define a test that will draw a list of sample pairs and determine
if they're correlated. We'll use `scipy.stats.pearsonr` which takes two arrays
and returns a correlation coefficient and a p-value representing the
probability the two distributions are correlated.
>>> from scipy.stats import pearsonr
>>> f = lambda x: pearsonr(x[0], x[1])[1]
Now, let's use random sampling to estimate the power of our test on
the first distribution.
>>> samples = [ind, dep]
>>> f(samples)
3.6459452596563003e-08
In `subsample_power`, we can maintain a paired relationship between samples
by setting `draw_mode` to "matched". We can also set our critical value, so
that we estimate power for a critical value of :math:`\alpha = 0.05`, an
estimate for the critical value of 0.01, and a critical value of 0.001.
>>> from skbio.stats.power import subsample_power
>>> pwr_100, counts_100 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.1)
>>> pwr_010, counts_010 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.01)
>>> pwr_001, counts_001 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.001)
>>> counts_100
array([3, 4, 5, 6, 7, 8, 9])
>>> pwr_100.mean(0)
array([ 0.4716, 0.8226, 0.9424, 0.986 , 0.9988, 1. , 1. ])
>>> pwr_010.mean(0)
array([ 0.0492, 0.2368, 0.5462, 0.823 , 0.9474, 0.9828, 0.9982])
>>> pwr_001.mean(0)
array([ 0.0028, 0.0174, 0.1262, 0.342 , 0.5928, 0.8256, 0.9594])
Based on this power estimate, as we increase our confidence that we have not
committed a type I error and identified a false positive, the number of samples
we need to be confident that we have not committed a type II error increases.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import viewitems
from future.builtins import range
import collections
import copy
import warnings
import numpy as np
import scipy.stats
def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
alpha_pwr : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Let's say we wanted to look at the relationship between the presence of a
specific bacteria, *Gardnerella vaginalis* in the vaginal community, and
the probability of a pre or post menopausal woman experiencing a urinary
tract infection (UTI). Healthy women were enrolled in the study either
before or after menopause, and followed for eight weeks. Participants
submitted fecal samples at the beginning of the study, and were then
followed for clinical symptoms of a UTI. A confirmed UTI was an endpoint
in the study.
Using available literature and 16S sequencing, a set of candidate taxa were
identified as correlated with UTIs, including *G. vaginalis*. In the 100
women (50 premenopausal and 50 postmenopausal samples) who had UTIs, the
presence or absence of *G. vaginalis* was confirmed with quantitative PCR.
We can model the probability that detectable *G. vaginalis* was found in
these samples using a binomial model. (*Note that this is a simulation.*)
>>> import numpy as np
>>> np.random.seed(25)
>>> pre_rate = np.random.binomial(1, 0.85, size=(50,))
>>> pre_rate.sum()
45
>>> pos_rate = np.random.binomial(1, 0.40, size=(50,))
>>> pos_rate.sum()
21
Let's set up a test function, so we can test the probability of
finding a difference in frequency between the two groups. We'll use
`scipy.stats.chisquare` to look for the difference in frequency between
groups.
>>> from scipy.stats import chisquare, nanmean
>>> test = lambda x: chisquare(np.array([x[i].sum() for i in
... xrange(len(x))]))[1]
Let's make sure that our two distributions are different.
>>> round(test([pre_rate, pos_rate]), 3)
0.003
Since there are an even number of samples, and we don't have enough
information to try controlling the data, we'll use
`skbio.stats.power.subsample_power` to compare the two groups. If we had
metadata about other risk factors, like a reproductive history, BMI,
tobacco use, we might want to use
`skbio.stats.power.subsample_paired_power`.
We'll also use "ind" `draw_mode`, since there is no linkage between the
two groups of samples.
>>> from skbio.stats.power import subsample_power
>>> pwr_est, counts = subsample_power(test=test,
... samples=[pre_rate, pos_rate],
... num_iter=100,
... num_runs=5,
... counts_interval=5)
>>> counts
array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
>>> nanmean(pwr_est, 0) # doctest: +NORMALIZE_WHITESPACE
array([ 0.056, 0.074, 0.226, 0.46 , 0.61 , 0.806, 0.952, 1. ,
1. ])
>>> counts[nanmean(pwr_est, 0) > 0.8].min()
30
So, we can estimate that we will see a significant difference in the
presence of *G. vaginalis* in the stool of pre and post women with UTIs if
we have at least 30 samples per group.
If we wanted to test the relationship of a second candidate taxa which is
more rare in the population, but may have a similar effect, based on
available literature, we might also start by trying to identify 30
samples per group where the second candidate taxa is present.
Suppose, now, that we want to test that a secondary metabolite seen only in
the presence of *G vaginalis* to see if it is also correlated with UTIs. We
can model the abundance of the metabolite as a normal distribution.
>>> met_pos = (np.random.randn(pre_rate.sum() + pos_rate.sum()) * 2000 +
... 2500)
>>> met_pos[met_pos < 0] = 0
>>> met_neg = met_neg = (np.random.randn(100 - (pre_rate.sum() +
... pos_rate.sum())) * 2000 + 500)
>>> met_neg[met_neg < 0] = 0
Let's compare the populations with a kruskal-wallis test. Physically, there
cannot be a negative concentration of a chemical, so we've set the lower
bound at 0. This means that we can no longer assume our distribution is
normal.
>>> from scipy.stats import kruskal
>>> def metabolite_test(x):
... return kruskal(x[0], x[1])[1]
>>> round(metabolite_test([met_pos, met_neg]), 3)
0.005
When we go to perform the statistical test on all the data, you might
notice that there are twice as many samples from women with *G. vaginalis*
than those without. It might make sense to account for this difference when
we're testing power. So, we're going to set the `ratio` parameter, which
lets us draw twice as many samples from women with *G. vaginalis*.
>>> pwr_est2, counts2 = subsample_power(test=metabolite_test,
... samples=[met_pos, met_neg],
... counts_interval=5,
... num_iter=100,
... num_runs=5,
... ratio=[2, 1])
>>> counts2
array([ 5., 10., 15., 20., 25., 30.])
>>> nanmean(pwr_est2, 0)
array([ 0.14 , 0.272, 0.426, 0.646, 0.824, 0.996])
>>> counts2[nanmean(pwr_est2, 0) > 0.8].min()
25.0
When we consider the number of samples per group needed in the power
analysis, we need to look at the ratio. The analysis says that we need 25
samples in the smallest group, in this case, the group of women without
*G. vaginalis* and 50 samples from women with *G. vaginalis* to see a
significant difference in the abundance of our secondary metabolite at 80%
power.
"""
# Checks the inputs
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates the power instances
for id2, c in enumerate(sample_counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
def subsample_paired_power(test, meta, cat, control_cats, order=None,
strict_match=True, alpha_pwr=0.05,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Estimates power iteratively using samples with matching metadata
Parameters
----------
test : function
The statistical test which accepts a list of arrays sample ids and
returns a p value.
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str
The metadata category being varied between samples.
control_cats : list
The metadata categories to be used as controls. For example, if
you wanted to vary age (`cat` = "AGE"), you might want to control
for gender and health status (i.e. `control_cats` = ["SEX",
"HEALTHY"]).
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match : bool, optional
This determines how data is grouped using
`control_cats`. If a sample within `meta` has an undefined value (NaN)
for any of the columns in `control_cats`, the sample will not be
considered as having a match and will be ignored when `strict_match`
is True. If `strict_match` is False, missing values (NaN) in the
`control_cats` can be considered matches.
alpha_pwr : float, optional
The critical value used to calculate the power.
max_counts : positive int, optional
The maximum number of observations per sample to draw
for effect size calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array is
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Assume you are interested in the role of a specific cytokine of protein
translocation in myeloid-lineage cells. You are able to culture two
macrophage lineages (bone marrow derived phagocytes and
peritoneally-derived macrophages). Due to unfortunate circumstances, your
growth media must be acquired from multiple sources (lab, company A,
company B). Also unfortunate, you must use labor-intensive low throughput
assays. You have some preliminary measurements, and you'd like to
predict how many (more) cells you need to analyze for 80% power.
You have information about 60 cells, which we'll simulate below. Note
that we are setting a random seed value for consistency.
>>> import numpy as np
>>> import pandas as pd
>>> np.random.seed(25)
>>> data = pd.DataFrame.from_dict({
... 'CELL_LINE': np.random.binomial(1, 0.5, size=(60,)),
... 'SOURCE': np.random.binomial(2, 0.33, size=(60,)),
... 'TREATMENT': np.hstack((np.zeros((30)), np.ones((30)))),
... 'INCUBATOR': np.random.binomial(1, 0.2, size=(60,))})
>>> data['OUTCOME'] = (0.25 + data.TREATMENT * 0.25) + \
... np.random.randn(60) * (0.1 + data.SOURCE/10 + data.CELL_LINE/5)
>>> data.loc[data.OUTCOME < 0, 'OUTCOME'] = 0
>>> data.loc[data.OUTCOME > 1, 'OUTCOME'] = 1
We will approach this by assuming that the distribution of our outcome is
not normally distributed, and apply a kruskal-wallis test to compare
between the cytokine treated and untreated cells.
>>> from scipy.stats import kruskal
>>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
Let's check that cytokine treatment has a significant effect across all
the cells.
>>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
>>> f(treatment_stat)
0.0019386336266250209
Now, let's pick the control categories. It seems reasonable to assume there
may be an effect of cell line on the treatment outcome, which may be
attributed to differences in receptor expression. It may also be possible
that there are differences due cytokine source. Incubators were maintained
under the same conditions throughout the experiment, within one degree of
temperature difference at any given time, and the same level of CO2.
So, at least initially, let's ignore differences due to the incubator.
It's recommended that as a first pass analysis, control variables be
selected based on an idea of what may be biologically relevant to the
system, although further iteration might encourage the consideration of
variable with effect sizes similar, or larger than the variable of
interest.
>>> control_cats = ['SOURCE', 'CELL_LINE']
>>> from skbio.stats.power import subsample_paired_power
>>> pwr, cnt = subsample_paired_power(test=f,
... meta=data,
... cat='TREATMENT',
... control_cats=control_cats,
... counts_interval=5,
... num_iter=100,
... num_runs=5)
>>> cnt
array([ 5., 10., 15., 20.])
>>> pwr.mean(0)
array([ 0.196, 0.356, 0.642, 0.87 ])
>>> pwr.std(0).round(3)
array([ 0.019, 0.021, 0.044, 0.026])
Estimating off the power curve, it looks like 20 cells per group may
provide adequate power for this experiment, although the large variance
in power might suggest extending the curves or increasing the number of
samples per group.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks for the number of sampling pairs available
meta_pairs, index = _identify_sample_groups(meta, cat, control_cats, order,
strict_match)
min_obs = min([_get_min_size(meta, cat, control_cats, order, strict_match),
np.floor(len(index)*0.9)])
sub_ids = _draw_paired_samples(meta_pairs, index, min_obs)
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=sub_ids,
draw_mode='matched',
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates power instances
for id2, c in enumerate(sample_counts):
for id1 in range(num_runs):
ps = np.zeros((num_p, num_iter))
for id3 in range(num_iter):
subs = _draw_paired_samples(meta_pairs, index, c)
ps[:, id3] = test(subs)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
bound = scipy.stats.nanstd(vec, axis=axis) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
alpha=0.05, mode='ind', num_iter=500, num_runs=10):
r"""Repeatedly calculates the power curve for a specified alpha level
.. note:: Deprecated in scikit-bio 0.2.3-dev
``bootstrap_power_curve`` will be removed in scikit-bio 0.4.1. It is
Deprecated in favor of using ``subsample_power`` or
``sample_paired_power`` to calculate a power array, and then using
``confidence_bound`` to perform bootstrapping.
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists or arrays) and returns a p-value.
samples : array_like
samples can be a list of lists or an array where each sublist or row in
the array corresponds to a sampled group.
sample_counts : 1-D array_like
A vector of the number of samples which should be sampled in each curve
ratio : 1-D array_like, optional
The fraction of the sample counts which should be
assigned to each
group. This must be a none-type object, or the same length as samples.
If Ratio is None, the same number of observations are drawn from
each sample.
alpha : float, optional
The default is 0.05. The critical value for calculating power.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power_mean : 1-D array
The mean p-values from the iterations.
power_bound : vector
The variance in the p-values.
Examples
--------
Suppose we have 100 samples randomly drawn from two normal distributions,
the first with mean 0 and standard deviation 1, and the second with mean 3
and standard deviation 1.5
>>> import numpy as np
>>> np.random.seed(20)
>>> samples_1 = np.random.randn(100)
>>> samples_2 = 1.5 * np.random.randn(100) + 1
We want to test the statistical power of an independent two sample t-test
comparing the two populations. We can define an anonymous function, `f`,
to wrap the scipy function for independent t tests,
`scipy.stats.ttest_ind`. The test function will take a list of value
vectors and return a p value.
>>> from scipy.stats import ttest_ind
>>> f = lambda x: ttest_ind(x[0], x[1])[1]
Now, we can determine the statistical power, or the probability that we do
not have a false negative given that we do not have a false positive, by
varying a number of subsamples.
>>> from skbio.stats.power import bootstrap_power_curve
>>> sample_counts = np.arange(5, 80, 5)
>>> power_mean, power_bound = bootstrap_power_curve(f,
... [samples_1, samples_2],
... sample_counts)
>>> sample_counts[power_mean - power_bound.round(3) > .80].min()
20
Based on this analysis, it looks like we need at least 20 observations
from each distribution to avoid committing a type II error more than 20%
of the time.
"""
warnings.warn("skbio.stats.power.bootstrap_power_curve is deprecated. "
"Please use skbio.stats.power.subsample_power or "
"skbio.stats.power.subsample_paired_power followed by "
"confidence_bound.", DeprecationWarning)
# Corrects the alpha value into a matrix
alpha = np.ones((num_runs)) * alpha
# Boot straps the power curve
power = _calculate_power_curve(test=test,
samples=samples,
sample_counts=sample_counts,
ratio=ratio,
num_iter=num_iter,
alpha=alpha,
mode=mode)
# Calculates two summary statistics
power_mean = power.mean(0)
power_bound = confidence_bound(power, alpha=alpha[0], axis=0)
# Calculates summary statistics
return power_mean, power_bound
def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
r"""Draws a list of samples varied by `cat` and matched for `control_cats`
This function is designed to provide controlled samples, based on a
metadata category. For example, one could control for age, sex, education
level, and diet type while measuring exercise frequency.
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
ids : array
a set of ids which satisfy the criteria. These are not grouped by
`cat`. An empty array indicates there are no sample ids which satisfy
the requirements.
Examples
--------
If we have a mapping file for a set of random individuals looking at
housing, sex, age and antibiotic use.
>>> import pandas as pd
>>> import numpy as np
>>> meta = {'SW': {'HOUSING': '2', 'SEX': 'M', 'AGE': np.nan, 'ABX': 'Y'},
... 'TS': {'HOUSING': '2', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'CB': {'HOUSING': '3', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'BB': {'HOUSING': '1', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'}}
>>> meta = pd.DataFrame.from_dict(meta, orient="index")
>>> meta #doctest: +SKIP
ABX HOUSING AGE SEX
BB Y 1 40s M
CB Y 3 40s M
SW Y 2 NaN M
TS Y 2 40s M
We may want to vary an individual's housing situation, while holding
constant their age, sex and antibiotic use so we can estimate the effect
size for housing, and later compare it to the effects of other variables.
>>> from skbio.stats.power import paired_subsamples
>>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
>>> np.hstack(ids) #doctest: +ELLIPSIS
array(['BB', 'TS', 'CB']...)
So, for this set of data, we can match TS, CB, and BB based on their age,
sex, and antibiotic use. SW cannot be matched in either group because
`strict_match` was true, and there is missing AGE data for this sample.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks the groups in the category
min_obs = _get_min_size(meta, cat, control_cats, order, strict_match)
# Identifies all possible subsamples
meta_pairs, index = _identify_sample_groups(meta=meta,
cat=cat,
control_cats=control_cats,
order=order,
strict_match=strict_match)
# Draws paired ids
ids = _draw_paired_samples(meta_pairs=meta_pairs,
index=index,
num_samps=min_obs)
return ids
def _get_min_size(meta, cat, control_cats, order, strict_match):
"""Determines the smallest group represented"""
if strict_match:
all_cats = copy.deepcopy(control_cats)
all_cats.append(cat)
meta = meta[all_cats].dropna()
return meta.groupby(cat).count().loc[order, control_cats[0]].min()
def _check_nans(x, switch=False):
r"""Returns False if x is a nan and True is x is a string or number
"""
if isinstance(x, str):
return True
elif isinstance(x, (float, int)):
return not np.isnan(x)
elif switch and isinstance(x, (list, tuple)) and np.nan in x:
return False
elif switch and isinstance(x, (list, tuple)):
return True
else:
raise TypeError('input must be a string, float or a nan')
def _calculate_power(p_values, alpha=0.05):
r"""Calculates statistical power empirically
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
p_values = np.atleast_2d(p_values)
w = (p_values < alpha).sum(axis=1)/p_values.shape[1]
return w
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
num_iter=100):
r"""Compares two distribution arrays iteratively
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists) and returns a p-value. This can be a one-dimensional
array, or a float.
samples : list of arrays
A list where each 1-d array represents a sample. If `mode` is
"matched", there must be an equal number of observations in each
sample.
num_p : positive int, optional
The number of p-values returned by the test.
counts : positive int or 1-D array, optional
The number of samples to draw from each distribution.
If this is a 1-D array, the length must correspond to the number of
samples. The function will not draw more observations than are in a
sample. In "matched" `mode`, the same number of observations will be
drawn from each group.
mode : {"ind", "matched", "paired"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
Default 1000. The number of p-values to generate for each point on the
curve.
Returns
-------
p_values : array
The p-values for the subsampled tests. If `test` returned a single
p value, p_values is a one-dimensional array. If `test` returned an
array, `p_values` has dimensions `num_iter` x `num_p`
Raises
------
ValueError
If mode is not "ind" or "matched".
ValueError
If the arrays in samples are not the same length in "matched" mode.
ValueError
If counts is a 1-D array and counts and samples are different lengths.
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=False)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i], replace=False)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _check_subsample_power_inputs(test, samples, draw_mode='ind', ratio=None,
max_counts=50, counts_interval=10,
min_counts=None):
r"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
"""
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_ids = id_counts.min()
# Determines the number of groups
num_groups = len(samples)
# Checks that "matched" mode is handled appropriately
if draw_mode == "matched":
for id_ in samples:
if not len(id_) == num_ids:
raise ValueError('Each vector in samples must be the same '
'length in "matched" draw_mode.')
# Checks the number of counts is appropriate
if min_counts is None:
min_counts = counts_interval
if (max_counts - min_counts) < counts_interval:
raise ValueError("No subsamples of the specified size can be drawn.")
# Checks the ratio argument is sane
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Calculates the same counts
sample_counts = np.arange(min_counts,
min(max_counts, largest),
counts_interval)
return ratio, num_p, sample_counts
def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
"""Aggregates samples matches for `control_cats` that vary by `cat`
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
ctrl_pos : int
The location of the smallest group in `order`.
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
"""
# Sets up variables to be filled
meta_pairs = {}
index = []
i1 = 0
# Groups the data by the control groups
ctrl_groups = meta.groupby(control_cats).groups
# Identifies the samples that satisfy the control pairs
for (g, ids) in viewitems(ctrl_groups):
# If strict_match, Skips over data that has nans
if not _check_nans(g, switch=True) and strict_match:
continue
# Draws the samples that are matched for control cats
m_ids = meta.loc[ids].groupby(cat).groups
# Checks if samples from the cat groups are represented in those
# Samples
ids_vec = id_vecs = [m_ids[o] for o in order if o in
m_ids]
# If all groups are represented, the index and results are retained
if len(ids_vec) == len(order):
min_vec = np.array([len(v) for v in id_vecs])
loc_vec = np.arange(0, min_vec.min())
meta_pairs[i1] = id_vecs
index.append(np.zeros(loc_vec.shape) + i1)
i1 = i1 + 1
# If the groups are not represented, an empty array gets passed
else:
index.append(np.array([]))
# Converts index to a 1d array
index = np.hstack(index)
# If index is empty, sets up meta_paris with a no key.
if not meta_pairs:
meta_pairs['no'] = order
return meta_pairs, index
def _draw_paired_samples(meta_pairs, index, num_samps):
"""Draws a random set of ids from a matched list
Parameters
----------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
Returns
-------
ids : list
A set of randomly selected ids groups from each group.
"""
# Handles an empty paired vector
if 'no' in meta_pairs:
return [np.array([]) for o in meta_pairs['no']]
# Identifies the absolute positions of the control group being drawn
set_pos = np.random.choice(index, int(num_samps),
replace=False).astype(int)
subs = []
# Draws the other groups
for set_, num_ in viewitems(collections.Counter(set_pos)):
r2 = [np.random.choice(col, num_, replace=False) for col in
meta_pairs[set_]]
subs.append(r2)
ids = [np.hstack(ids) for ids in zip(*subs)]
return ids
def _calculate_power_curve(test, samples, sample_counts, ratio=None,
mode='ind', num_iter=1000, alpha=0.05):
r"""Generates an empirical power curve for the samples.
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values and
returns a p value.
samples : array_like
`samples` can be a list of lists or an array where each sublist or row
in the array corresponds to a sampled group.
sample_counts : 1-D array
A vector of the number of samples which should be sampled in each
curve.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample.
num_iter : int
The default is 1000. The number of p-values to generate for each point
on the curve.
Returns
-------
p_values : array
The p-values associated with the input sample counts.
Raises
------
ValueError
If ratio is an array and ratio is not the same length as samples
"""
# Casts array-likes to arrays
sample_counts = np.asarray(sample_counts)
# Determines the number of groups
num_groups = len(samples)
num_samps = len(sample_counts)
if isinstance(alpha, float):
vec = True
pwr = np.zeros((num_samps))
alpha = np.array([alpha])
else:
vec = False
num_crit = alpha.shape[0]
pwr = np.zeros((num_crit, num_samps))
# Checks the ratio argument
if ratio is None:
ratio = np.ones((num_groups))
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
# Loops through the sample sizes
for id2, s in enumerate(sample_counts):
count = np.round(s * ratio, 0).astype(int)
for id1, a in enumerate(alpha):
ps = _compare_distributions(test=test,
samples=samples,
counts=count,
num_p=1,
num_iter=num_iter,
mode=mode)
if vec:
pwr[id2] = _calculate_power(ps, a)
else:
pwr[id1, id2] = _calculate_power(ps, a)
return pwr
|
johnchase/scikit-bio
|
skbio/stats/power.py
|
Python
|
bsd-3-clause
| 51,572
|
[
"scikit-bio"
] |
5d8a683661c31db6315728405f12db648deebf192a91057bd5114be9024ff20a
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
**espressopp.analysis.MaxPID**
******************************
.. function:: espressopp.analysis.MaxPID(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_MaxPID
class MaxPIDLocal(ObservableLocal, analysis_MaxPID):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_MaxPID, system)
if pmi.isController :
class MaxPID(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.MaxPIDLocal'
)
|
capoe/espressopp.soap
|
src/analysis/MaxPID.py
|
Python
|
gpl-3.0
| 1,638
|
[
"ESPResSo"
] |
88927e9542d9b6b5535a82fa09faeacbc3630416f6534a0b741f94f6164c25c3
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.orca.learn.optimizers.optimizers_impl import *
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/optimizers/__init__.py
|
Python
|
apache-2.0
| 645
|
[
"ORCA"
] |
02354115448aa7b7c62d20f39aae41d4302d6002448439568371db2b18caa4a6
|
"""
Project
FPGA-Imaging-Library
Design
Shear
Function
Shearing an image by your given sh.
Module
Software simulation.
Version
1.0
Modified
2015-05-28
Copyright (C) 2015 Tianyu Dai (dtysky) <dtysky@outlook.com>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Homepage for this project:
http://fil.dtysky.moe
Sources for this project:
https://github.com/dtysky/FPGA-Imaging-Library
My e-mail:
dtysky@outlook.com
My blog:
http://dtysky.moe
"""
__author__ = 'Tianyu Dai (dtysky)'
from PIL import Image
import os, json
from ctypes import *
user32 = windll.LoadLibrary('user32.dll')
MessageBox = lambda x:user32.MessageBoxA(0, x, 'Error', 0)
FileFormat = ['.jpg', '.bmp']
Conf = json.load(open('../ImageForTest/conf.json', 'r'))['conf']
Debug = False
def show_error(e):
MessageBox(e)
exit(0)
def name_format(root, name, ex, conf):
return '%s-%sx%s-soft%s' % (name, conf['ush'], conf['vsh'], '.bmp')
def address_gen(u, v, ush, vsh):
return u + int(round(vsh * v)), v + int(round(ush * u))
def in_range(pos, crange):
return pos[0] >= crange[2] and pos[0] <= crange[3] and pos[1] >= crange[0] and pos[1] <= crange[1]
# x = u + shv * v
# y = v + shu * u
def transform(im, conf):
mode = im.mode
ush, vsh = (conf['ush'], conf['vsh'])
if mode not in ['L']:
show_error('Simulations for this module just support Gray-sh images, check your images !')
if im.size != (512, 512):
show_error('Simulations for this module just support 512x512 images, check your images !')
if ush >= 64 or ush <= -64 or vsh >= 64 or vsh <= -64:
show_error('shs for simulations for this module just supports -63.x - 63.x, check your conf !')
data_src = im.getdata()
xsize, ysize = im.size
data_res = list(Image.new('L', (xsize, ysize), 0).getdata())
crange = (0, xsize - 1, 0, ysize - 1)
for v in xrange(ysize):
for u in xrange(xsize):
xaddr, yaddr = address_gen(u, v, ush, vsh)
data_res[v * xsize + u] = data_src[yaddr * xsize + xaddr]\
if in_range((xaddr, yaddr), crange) else 0
im_res = Image.new(mode, im.size)
im_res.putdata(data_res)
return im_res
def debug(im, conf):
mode = im.mode
ush, vsh = (conf['ush'], conf['vsh'])
if mode not in ['L']:
show_error('Simulations for this module just support Gray-sh images, check your images !')
if im.size != (512, 512):
show_error('Simulations for this module just support 512x512 images, check your images !')
if ush >= 64 or ush <= -64 or vsh >= 64 or vsh <= -64:
show_error('shs for simulations for this module just supports -63.x - 63.x, check your conf !')
data_src = im.getdata()
xsize, ysize = im.size
data_res = list(Image.new('L', (xsize, ysize), 0).getdata())
crange = (0, xsize - 1, 0, ysize - 1)
for v in xrange(ysize):
for u in xrange(xsize):
xaddr, yaddr = address_gen(u, v, ush, vsh)
data_res[y * xsize + x] = '%s\n' % (
str(data_src[yaddr * xsize + xaddr]) \
if in_range((xaddr, yaddr), crange) else '0')
data_res = ''.join(data_res)
return data_res
FileAll = []
for root, dirs, files in os.walk('../ImageForTest'):
for f in files:
name, ex = os.path.splitext(f)
if ex in FileFormat:
FileAll.append((root+'/', name, ex))
for root, name, ex in FileAll:
im_src = Image.open(root + name + ex)
for c in Conf:
if Debug:
open('../SimResCheck/%s.dat' \
% name_format(root, name, ex, c), 'w').write(debug(im_src, c))
continue
transform(im_src, c).save('../SimResCheck/%s' % name_format(root, name, ex, c))
|
jaechoon2/FPGA-Imaging-Library
|
Geometry/Shear/SoftwareSim/sim.py
|
Python
|
lgpl-2.1
| 4,117
|
[
"MOE"
] |
4d0479cc3f1d10042db1c1f2b440eafa4ed8a8db73a6b5936928d4f2302fabd8
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy import signals
from hyperspy import components1d
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class TestRemoveBackground1DGaussian:
def setup_method(self, method):
gaussian = components1d.Gaussian()
gaussian.A.value = 10
gaussian.centre.value = 10
gaussian.sigma.value = 1
self.signal = signals.Signal1D(
gaussian.function(np.arange(0, 20, 0.01)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_gaussian(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
show_progressbar=None)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
def test_background_remove_gaussian_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DPowerLaw:
def setup_method(self, method):
pl = components1d.PowerLaw()
pl.A.value = 1e10
pl.r.value = 3
self.signal = signals.Signal1D(
pl.function(np.arange(100, 200)))
self.signal.axes_manager[0].offset = 100
self.signal.metadata.Signal.binned = False
self.signal_noisy = self.signal.deepcopy()
self.signal_noisy.add_gaussian_noise(1)
self.atol = 0.04 * abs(self.signal.data).max()
self.atol_zero_fill = 0.04 * abs(self.signal.isig[10:].data).max()
def test_background_remove_pl(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
assert s1.axes_manager.navigation_dimension == 0
def test_background_remove_pl_zero(self):
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def test_background_remove_pl_int(self):
self.signal.change_dtype("int")
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
def test_background_remove_pl_int_zero(self):
self.signal_noisy.change_dtype("int")
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
|
magnunor/hyperspy
|
hyperspy/tests/signal/test_remove_background.py
|
Python
|
gpl-3.0
| 4,285
|
[
"Gaussian"
] |
afe69defdfed37b845b2e17787ca9acf7a6f1d4c2c308a641e81ad2b2445387c
|
""" Generation of sine-Gaussian bursty type things
"""
import pycbc.types
import numpy
def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f):
""" Generate a Fourier domain sine-Gaussian
Parameters
----------
amp: float
Amplitude of the sine-Gaussian
quality: float
The quality factor
central_frequency: float
The central frequency of the sine-Gaussian
fmin: float
The minimum frequency to generate the sine-Gaussian. This determines
the length of the output vector.
fmax: float
The maximum frequency to generate the sine-Gaussian
delta_f: float
The size of the frequency step
Returns
-------
sg: pycbc.types.Frequencyseries
A Fourier domain sine-Gaussian
"""
f = numpy.arange(fmin, fmax, delta_f)
kmax = int(fmax / delta_f)
kmin = int(fmin / delta_f)
tau = quality / 2 / numpy.pi / central_frequency
A = amp * numpy.pi ** 0.5 / 2 * tau
d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0)
d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency))
v = numpy.zeros(kmax, dtype=numpy.complex128)
v[kmin:kmax] = d[:]
return pycbc.types.FrequencySeries(v, delta_f=delta_f)
|
tjma12/pycbc
|
pycbc/waveform/sinegauss.py
|
Python
|
gpl-3.0
| 1,270
|
[
"Gaussian"
] |
dda5b931678ceac5a9e288288f07e659331292025fc0c0fb2e62fb554b024123
|
######################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Demonstrate the linear state-space model with time-varying dynamics.
The observation is 1-D signal with changing frequency. The frequency oscillates
so it can be learnt too. Missing values are used to create a few gaps in the
data so the task is to reconstruct the gaps.
For reference, see the following publication:
(TODO)
Some functions in this module are re-usable:
* ``model`` can be used to construct the LSSM with switching dynamics.
* ``infer`` can be used to apply the model to given data.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
from bayespy.nodes import (GaussianMarkovChain,
VaryingGaussianMarkovChain,
GaussianARD,
Gamma,
SumMultiply)
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
from bayespy.inference.vmp.nodes.gaussian import GaussianMoments
import bayespy.plot.plotting as bpplt
def model(M, N, D, K):
"""
Construct the linear state-space model with time-varying dynamics
For reference, see the following publication:
(TODO)
"""
#
# The model block for the latent mixing weight process
#
# Dynamics matrix with ARD
# beta : (K) x ()
beta = Gamma(1e-5,
1e-5,
plates=(K,),
name='beta')
# B : (K) x (K)
B = GaussianARD(np.identity(K),
beta,
shape=(K,),
plates=(K,),
name='B',
plotter=bpplt.GaussianHintonPlotter(rows=0,
cols=1,
scale=0),
initialize=False)
B.initialize_from_value(np.identity(K))
# Mixing weight process, that is, the weights in the linear combination of
# state dynamics matrices
# S : () x (N,K)
S = GaussianMarkovChain(np.ones(K),
1e-6*np.identity(K),
B,
np.ones(K),
n=N,
name='S',
plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
initialize=False)
s = 10*np.random.randn(N,K)
s[:,0] = 10
S.initialize_from_value(s)
#
# The model block for the latent states
#
# Projection matrix of the dynamics matrix
# alpha : (K) x ()
alpha = Gamma(1e-5,
1e-5,
plates=(D,K),
name='alpha')
alpha.initialize_from_value(1*np.ones((D,K)))
# A : (D) x (D,K)
A = GaussianARD(0,
alpha,
shape=(D,K),
plates=(D,),
name='A',
plotter=bpplt.GaussianHintonPlotter(rows=0,
cols=1,
scale=0),
initialize=False)
# Initialize S and A such that A*S is almost an identity matrix
a = np.zeros((D,D,K))
a[np.arange(D),np.arange(D),np.zeros(D,dtype=int)] = 1
a[:,:,0] = np.identity(D) / s[0,0]
a[:,:,1:] = 0.1/s[0,0]*np.random.randn(D,D,K-1)
A.initialize_from_value(a)
# Latent states with dynamics
# X : () x (N,D)
X = VaryingGaussianMarkovChain(np.zeros(D), # mean of x0
1e-3*np.identity(D), # prec of x0
A, # dynamics matrices
S._convert(GaussianMoments)[1:], # temporal weights
np.ones(D), # innovation
n=N, # time instances
name='X',
plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
initialize=False)
X.initialize_from_value(np.random.randn(N,D))
#
# The model block for observations
#
# Mixing matrix from latent space to observation space using ARD
# gamma : (D) x ()
gamma = Gamma(1e-5,
1e-5,
plates=(D,),
name='gamma')
gamma.initialize_from_value(1e-2*np.ones(D))
# C : (M,1) x (D)
C = GaussianARD(0,
gamma,
shape=(D,),
plates=(M,1),
name='C',
plotter=bpplt.GaussianHintonPlotter(rows=0,
cols=2,
scale=0))
C.initialize_from_value(np.random.randn(M,1,D))
# Noiseless process
# F : (M,N) x ()
F = SumMultiply('d,d',
C,
X,
name='F')
# Observation noise
# tau : () x ()
tau = Gamma(1e-5,
1e-5,
name='tau')
tau.initialize_from_value(1e2)
# Observations
# Y: (M,N) x ()
Y = GaussianARD(F,
tau,
name='Y')
# Construct inference machine
Q = VB(Y, F, C, gamma, X, A, alpha, tau, S, B, beta)
return Q
def infer(y, D, K,
mask=True,
maxiter=100,
rotate=False,
debug=False,
precompute=False,
update_hyper=0,
start_rotating=0,
start_rotating_weights=0,
plot_C=True,
monitor=True,
autosave=None):
"""
Run VB inference for linear state-space model with time-varying dynamics.
"""
y = utils.atleast_nd(y, 2)
(M, N) = np.shape(y)
# Construct the model
Q = model(M, N, D, K)
if not plot_C:
Q['C'].set_plotter(None)
if autosave is not None:
Q.set_autosave(autosave, iterations=10)
# Observe data
Q['Y'].observe(y, mask=mask)
# Set up rotation speed-up
if rotate:
# Initial rotate the D-dimensional state space (X, A, C)
# Does not update hyperparameters
rotA_init = transformations.RotateGaussianARD(Q['A'],
axis=0,
precompute=precompute)
rotX_init = transformations.RotateVaryingMarkovChain(Q['X'],
Q['A'],
Q['S']._convert(GaussianMoments)[...,1:,None],
rotA_init)
rotC_init = transformations.RotateGaussianARD(Q['C'],
axis=0,
precompute=precompute)
R_X_init = transformations.RotationOptimizer(rotX_init, rotC_init, D)
# Rotate the D-dimensional state space (X, A, C)
rotA = transformations.RotateGaussianARD(Q['A'],
Q['alpha'],
axis=0,
precompute=precompute)
rotX = transformations.RotateVaryingMarkovChain(Q['X'],
Q['A'],
Q['S']._convert(GaussianMoments)[...,1:,None],
rotA)
rotC = transformations.RotateGaussianARD(Q['C'],
Q['gamma'],
axis=0,
precompute=precompute)
R_X = transformations.RotationOptimizer(rotX, rotC, D)
# Rotate the K-dimensional latent dynamics space (S, A, C)
rotB = transformations.RotateGaussianARD(Q['B'],
Q['beta'],
precompute=precompute)
rotS = transformations.RotateGaussianMarkovChain(Q['S'], rotB)
rotA = transformations.RotateGaussianARD(Q['A'],
Q['alpha'],
axis=-1,
precompute=precompute)
R_S = transformations.RotationOptimizer(rotS, rotA, K)
if debug:
rotate_kwargs = {'maxiter': 10,
'check_bound': True,
'check_gradient': True}
else:
rotate_kwargs = {'maxiter': 10}
# Plot initial distributions
if monitor:
Q.plot()
# Run inference using rotations
for ind in range(maxiter):
if ind < update_hyper:
# It might be a good idea to learn the lower level nodes a bit
# before starting to learn the upper level nodes.
Q.update('X', 'C', 'A', 'tau', plot=monitor)
if rotate and ind >= start_rotating:
# Use the rotation which does not update alpha nor beta
R_X_init.rotate(**rotate_kwargs)
else:
Q.update(plot=monitor)
if rotate and ind >= start_rotating:
# It might be a good idea to not rotate immediately because it
# might lead to pruning out components too efficiently before
# even estimating them roughly
R_X.rotate(**rotate_kwargs)
if ind >= start_rotating_weights:
R_S.rotate(**rotate_kwargs)
# Return the posterior approximation
return Q
def simulate_data(N):
"""
Generate a signal with changing frequency
"""
t = np.arange(N)
a = 0.1 * 2*np.pi # base frequency
b = 0.01 * 2*np.pi # frequency of the frequency change
c = 8 # magnitude of the frequency change
f = np.sin( a * (t + c*np.sin(b*t)) )
y = f + 0.1*np.random.randn(N)
return (y, f)
def demo(N=1000, D=5, K=4, seed=42, maxiter=200, rotate=True, debug=False,
precompute=False, plot=True):
# Seed for random number generator
if seed is not None:
np.random.seed(seed)
# Create data
(y, f) = simulate_data(N)
# Create some gaps
mask_gaps = utils.trues(N)
for m in range(100, N, 140):
start = m
end = min(m+15, N-1)
mask_gaps[start:end] = False
# Randomly missing values
mask_random = np.logical_or(random.mask(N, p=0.8),
np.logical_not(mask_gaps))
# Remove the observations
mask = np.logical_and(mask_gaps, mask_random)
y[~mask] = np.nan # BayesPy doesn't require NaNs, they're just for plotting.
# Add row axes
y = y[None,...]
f = f[None,...]
mask = mask[None,...]
mask_gaps = mask_gaps[None,...]
mask_random = mask_random[None,...]
# Run the method
Q = infer(y, D, K,
mask=mask,
maxiter=maxiter,
rotate=rotate,
debug=debug,
precompute=precompute,
update_hyper=10,
start_rotating_weights=20,
monitor=True)
if plot:
# Plot observations
plt.figure()
bpplt.timeseries_normal(Q['F'], scale=2)
bpplt.timeseries(f, 'b-')
bpplt.timeseries(y, 'r.')
plt.ylim([-2, 2])
# Plot latent space
Q.plot('X')
# Plot mixing weight space
Q.plot('S')
# Compute RMSE
rmse_random = utils.rmse(Q['Y'].get_moments()[0][~mask_random],
f[~mask_random])
rmse_gaps = utils.rmse(Q['Y'].get_moments()[0][~mask_gaps],
f[~mask_gaps])
print("RMSE for randomly missing values: %f" % rmse_random)
print("RMSE for gap values: %f" % rmse_gaps)
plt.show()
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
[
"n=",
"d=",
"k=",
"seed=",
"maxiter=",
"debug",
"precompute",
"no-plot",
"no-rotation"])
except getopt.GetoptError:
print('python lssm_tvd.py <options>')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Dimensionality of the latent vectors in the model')
print('--k=<INT> Dimensionality of the latent mixing weights')
print('--no-rotation Do not apply speed-up rotations')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--debug Check that the rotations are implemented correctly')
print('--no-plot Do not plot results')
print('--precompute Precompute some moments when rotating. May '
'speed up or slow down.')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--no-rotation":
kwargs["rotate"] = False
elif opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--debug":
kwargs["debug"] = True
elif opt == "--precompute":
kwargs["precompute"] = True
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt == "--n":
kwargs["N"] = int(arg)
elif opt == "--d":
kwargs["D"] = int(arg)
elif opt == "--k":
if int(arg) == 0:
kwargs["K"] = None
else:
kwargs["K"] = int(arg)
elif opt == "--no-plot":
kwargs["plot"] = False
else:
raise ValueError("Unhandled argument given")
demo(**kwargs)
|
nipunbatra/bayespy
|
bayespy/demos/lssm_tvd.py
|
Python
|
gpl-3.0
| 15,450
|
[
"Gaussian"
] |
a89be6df26da0d66e54a3d4aee4e7e975e09d391e27308a00bfad840d2e3c438
|
"""Amber Electric Sensor definitions."""
# There are three types of sensor: Current, Forecast and Grid
# Current and forecast will create general, controlled load and feed in as required
# At the moment renewables in the only grid sensor.
from __future__ import annotations
from collections.abc import Mapping
from typing import Any
from amberelectric.model.channel import ChannelType
from amberelectric.model.current_interval import CurrentInterval
from amberelectric.model.forecast_interval import ForecastInterval
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CURRENCY_DOLLAR, ENERGY_KILO_WATT_HOUR
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION, DOMAIN
from .coordinator import AmberUpdateCoordinator
ICONS = {
"general": "mdi:transmission-tower",
"controlled_load": "mdi:clock-outline",
"feed_in": "mdi:solar-power",
}
UNIT = f"{CURRENCY_DOLLAR}/{ENERGY_KILO_WATT_HOUR}"
def format_cents_to_dollars(cents: float) -> float:
"""Return a formatted conversion from cents to dollars."""
return round(cents / 100, 2)
def friendly_channel_type(channel_type: str) -> str:
"""Return a human readable version of the channel type."""
if channel_type == "controlled_load":
return "Controlled Load"
if channel_type == "feed_in":
return "Feed In"
return "General"
class AmberSensor(CoordinatorEntity, SensorEntity):
"""Amber Base Sensor."""
_attr_attribution = ATTRIBUTION
def __init__(
self,
coordinator: AmberUpdateCoordinator,
description: SensorEntityDescription,
channel_type: ChannelType,
) -> None:
"""Initialize the Sensor."""
super().__init__(coordinator)
self.site_id = coordinator.site_id
self.entity_description = description
self.channel_type = channel_type
self._attr_unique_id = (
f"{self.site_id}-{self.entity_description.key}-{self.channel_type}"
)
class AmberPriceSensor(AmberSensor):
"""Amber Price Sensor."""
@property
def native_value(self) -> float | None:
"""Return the current price in $/kWh."""
interval = self.coordinator.data[self.entity_description.key][self.channel_type]
if interval.channel_type == ChannelType.FEED_IN:
return format_cents_to_dollars(interval.per_kwh) * -1
return format_cents_to_dollars(interval.per_kwh)
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return additional pieces of information about the price."""
interval = self.coordinator.data[self.entity_description.key][self.channel_type]
data: dict[str, Any] = {}
if interval is None:
return data
data["duration"] = interval.duration
data["date"] = interval.date.isoformat()
data["per_kwh"] = format_cents_to_dollars(interval.per_kwh)
if interval.channel_type == ChannelType.FEED_IN:
data["per_kwh"] = data["per_kwh"] * -1
data["nem_date"] = interval.nem_time.isoformat()
data["spot_per_kwh"] = format_cents_to_dollars(interval.spot_per_kwh)
data["start_time"] = interval.start_time.isoformat()
data["end_time"] = interval.end_time.isoformat()
data["renewables"] = round(interval.renewables)
data["estimate"] = interval.estimate
data["spike_status"] = interval.spike_status.value
data["channel_type"] = interval.channel_type.value
if interval.range is not None:
data["range_min"] = format_cents_to_dollars(interval.range.min)
data["range_max"] = format_cents_to_dollars(interval.range.max)
return data
class AmberForecastSensor(AmberSensor):
"""Amber Forecast Sensor."""
@property
def native_value(self) -> float | None:
"""Return the first forecast price in $/kWh."""
intervals = self.coordinator.data[self.entity_description.key].get(
self.channel_type
)
if not intervals:
return None
interval = intervals[0]
if interval.channel_type == ChannelType.FEED_IN:
return format_cents_to_dollars(interval.per_kwh) * -1
return format_cents_to_dollars(interval.per_kwh)
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return additional pieces of information about the price."""
intervals = self.coordinator.data[self.entity_description.key].get(
self.channel_type
)
if not intervals:
return None
data = {
"forecasts": [],
"channel_type": intervals[0].channel_type.value,
}
for interval in intervals:
datum = {}
datum["duration"] = interval.duration
datum["date"] = interval.date.isoformat()
datum["nem_date"] = interval.nem_time.isoformat()
datum["per_kwh"] = format_cents_to_dollars(interval.per_kwh)
if interval.channel_type == ChannelType.FEED_IN:
datum["per_kwh"] = datum["per_kwh"] * -1
datum["spot_per_kwh"] = format_cents_to_dollars(interval.spot_per_kwh)
datum["start_time"] = interval.start_time.isoformat()
datum["end_time"] = interval.end_time.isoformat()
datum["renewables"] = round(interval.renewables)
datum["spike_status"] = interval.spike_status.value
if interval.range is not None:
datum["range_min"] = format_cents_to_dollars(interval.range.min)
datum["range_max"] = format_cents_to_dollars(interval.range.max)
data["forecasts"].append(datum)
return data
class AmberGridSensor(CoordinatorEntity, SensorEntity):
"""Sensor to show single grid specific values."""
_attr_attribution = ATTRIBUTION
def __init__(
self,
coordinator: AmberUpdateCoordinator,
description: SensorEntityDescription,
) -> None:
"""Initialize the Sensor."""
super().__init__(coordinator)
self.site_id = coordinator.site_id
self.entity_description = description
self._attr_unique_id = f"{coordinator.site_id}-{description.key}"
@property
def native_value(self) -> str | None:
"""Return the value of the sensor."""
return self.coordinator.data["grid"][self.entity_description.key]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a config entry."""
coordinator: AmberUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
current: dict[str, CurrentInterval] = coordinator.data["current"]
forecasts: dict[str, list[ForecastInterval]] = coordinator.data["forecasts"]
entities: list = []
for channel_type in current:
description = SensorEntityDescription(
key="current",
name=f"{entry.title} - {friendly_channel_type(channel_type)} Price",
native_unit_of_measurement=UNIT,
state_class=STATE_CLASS_MEASUREMENT,
icon=ICONS[channel_type],
)
entities.append(AmberPriceSensor(coordinator, description, channel_type))
for channel_type in forecasts:
description = SensorEntityDescription(
key="forecasts",
name=f"{entry.title} - {friendly_channel_type(channel_type)} Forecast",
native_unit_of_measurement=UNIT,
state_class=STATE_CLASS_MEASUREMENT,
icon=ICONS[channel_type],
)
entities.append(AmberForecastSensor(coordinator, description, channel_type))
renewables_description = SensorEntityDescription(
key="renewables",
name=f"{entry.title} - Renewables",
native_unit_of_measurement="%",
state_class=STATE_CLASS_MEASUREMENT,
icon="mdi:solar-power",
)
entities.append(AmberGridSensor(coordinator, renewables_description))
async_add_entities(entities)
|
jawilson/home-assistant
|
homeassistant/components/amberelectric/sensor.py
|
Python
|
apache-2.0
| 8,354
|
[
"Amber"
] |
dce0a7fe3dd89a36372333787b7f527fad6ab312fbd0027fa926c7490a72c808
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2021 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
class WebToons(_ParserScraper):
imageSearch = '//img[contains(@class, "_images")]/@data-url'
prevSearch = '//a[contains(@class, "_prevEpisode")]'
multipleImagesPerStrip = True
def __init__(self, name, url, titlenum):
super(WebToons, self).__init__('WebToons/' + name)
baseUrl = 'https://www.webtoons.com/en/'
self.url = baseUrl + url + '/episode/viewer?title_no=' + str(titlenum)
self.listUrl = baseUrl + url + '/list?title_no=' + str(titlenum)
self.stripUrl = self.url + '&episode_no=%s'
self.firstStripUrl = self.stripUrl % '1'
def starter(self):
# Avoid age/GDPR gate
for cookie in ('needGDPR', 'needCCPA', 'needCOPPA'):
self.session.cookies.set(cookie, 'false', domain='webtoons.com')
# Find current episode number
listPage = self.getPage(self.listUrl)
currentEpisode = listPage.xpath('//div[@class="detail_lst"]/ul/li')[0].attrib['data-episode-no']
# Check for completed tag
self.endOfLife = (listPage.xpath('//div[@id="_asideDetail"]//span[@class="txt_ico_completed2"]') != [])
return self.stripUrl % currentEpisode
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super(WebToons, self).fetchUrls(url, data, urlSearch)
# Update firstStripUrl with the correct episode title
if url.rsplit('=', 1)[-1] == '1':
self.firstStripUrl = url
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('=', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1].split('?', 1)[0]
return "%s-%03d.%s" % (episodeNum, imageNum, imageExt)
@classmethod
def getmodules(cls): # noqa: Allowed to be long
return (
# START AUTOUPDATE
cls('1000', 'action/one-thousand', 1217),
cls('10thDimensionBoys', 'comedy/10th-dimension-boys', 71),
cls('1111Animals', 'comedy/1111-animals', 437),
cls('2015SpaceSeries', 'sf/2015-space-series', 391),
cls('3SecondStrip', 'comedy/3-second-strip', 380),
cls('ABittersweetLife', 'slice-of-life/a-bittersweet-life', 294),
cls('AboutDeath', 'drama/about-death', 82),
cls('ABudgiesLife', 'slice-of-life/its-a-budgies-life', 985),
cls('Acception', 'drama/acception', 1513),
cls('Acursian', 'supernatural/acursian', 1452),
cls('Adamsville', 'horror/adamsville', 502),
cls('AdventuresOfGod', 'comedy/adventures-of-god', 853),
cls('AerialMagic', 'fantasy/aerial-magic', 1358),
cls('AgeMatters', 'romance/age-matters', 1364),
cls('AGoodDayToBeADog', 'romance/a-good-day-tobe-a-dog', 1390),
cls('Aisopos', 'drama/aisopos', 76),
cls('AliceElise', 'fantasy/alice-elise', 1481),
cls('AllThatWeHopeToBe', 'slice-of-life/all-that-we-hope-to-be', 470),
cls('AllThatYouAre', 'drama/all-that-you-are', 403),
cls('AlwaysHuman', 'romance/always-human', 557),
cls('Annarasumanara', 'drama/annarasumanara', 77),
cls('Anthronauts', 'challenge/anthronauts', 358917),
cls('AphroditeIX', 'sf/aphroditeix', 1451),
cls('ApocalypticHorseplay', 'supernatural/apocalyptic-horseplay', 635),
cls('AprilFlowers', 'fantasy/april-flowers', 1363),
cls('Arma', 'super-hero/arma', 1640),
cls('AsPerUsual', 'slice-of-life/as-per-usual', 599),
cls('AssassinRoommate', 'romance/assassin-roommate', 1050),
cls('AthenaComplex', 'fantasy/athena-complex', 867),
cls('AuraFromAnotherPlanet', 'comedy/aura-from-another-planet', 369),
cls('AverageAdventuresOfAnAverageGirl', 'slice-of-life/average-adventures-of-an-average-girl', 401),
cls('AXED', 'comedy/axed', 1558),
cls('Backchannel', 'super-hero/backchannel', 1456),
cls('BadSigns', 'comedy/bad-signs', 1623),
cls('Bastard', 'thriller/bastard', 485),
cls('BeforeWeKnewIt', 'romance/before-we-knew-it', 1972),
cls('BehindTheGIFs', 'comedy/behind-the-gifs', 658),
cls('BigJo', 'romance/big-jo', 854),
cls('BiteMe', 'thriller/bite-me', 1019),
cls('Blackened', 'challenge/blackened', 363805),
cls('BladesOfFurry', 'romance/blades-of-furry', 2383),
cls('Blessed', 'drama/blessed', 1193),
cls('BloodInk', 'action/blood-ink', 1490),
cls('BloodlessWars', 'sf/bloodless-wars', 1622),
cls('BloopBloopRelationshipComic', 'challenge/bloop-bloop-relationship-comic', 239970),
cls('Bluechair', 'slice-of-life/bluechair', 199),
cls('BOOItsSex', 'slice-of-life/boo-its-sex', 1413),
cls('BoyfriendOfTheDead', 'comedy/boyfriend-of-the-dead', 1102),
cls('BrassAndSass', 'romance/brass-and-sass', 1652),
cls('BrimstoneAndRoses', 'romance/brimstone-and-roses', 1758),
cls('BrothersBond', 'action/brothersbond', 1458),
cls('BrutallyHonest', 'comedy/brutally-honest', 799),
cls('BuzzFeedComics', 'comedy/buzzfeed-comics', 585),
cls('CapeOfSpirits', 'action/cape-of-spirits', 1559),
cls('CARL', 'slice-of-life/carl', 1216),
cls('Caster', 'action/caster', 1461),
cls('CastleSwimmer', 'fantasy/castle-swimmer', 1499),
cls('Catharsis', 'fantasy/catharsis', 396),
cls('CatLoafAdventures', 'slice-of-life/cat-loaf-adventures', 1381),
cls('CheeseInTheTrap', 'drama/cheese-in-the-trap', 99),
cls('CherryBlossoms', 'romance/cherry-blossoms', 1005),
cls('Chiller', 'thriller/chiller', 536),
cls('ChocoLatte', 'romance/choco-latte', 1691),
cls('CChansACatgirl', 'challenge/c-chans-a-catgirl', 263430),
cls('CityOfBlank', 'sf/city-of-blank', 1895),
cls('CityOfWalls', 'drama/city-of-wall', 505),
cls('CityVamps', 'challenge/city-vamps-', 119224),
cls('ClusterFudge', 'slice-of-life/cluster-fudge', 355),
cls('CodeAdam', 'action/code-adam', 1657),
cls('CookingComically', 'tiptoon/cooking-comically', 622),
cls('CrapIDrewOnMyLunchBreak', 'challenge/crap-i-drew-on-my-lunch-break', 124756),
cls('Crumbs', 'romance/crumbs', 1648),
cls('CrystalVirus', 'challenge/crystal-virus', 347038),
cls('CupidsArrows', 'romance/cupids-arrows', 1538),
cls('CursedPrincessClub', 'comedy/cursed-princess-club', 1537),
cls('Cyberbunk', 'sf/cyberbunk', 466),
cls('Cyberforce', 'super-hero/cyberforce', 531),
cls('CykoKO', 'super-hero/cyko-ko', 560),
cls('Darbi', 'action/darbi', 1098),
cls('DatingWithATail', 'romance/dating-with-a-tail', 1263),
cls('Davinchibi', 'fantasy/davinchibi', 1190),
cls('DaYomanvilleGang', 'drama/da-yomanville-gang', 1578),
cls('DaysOfHana', 'drama/days-of-hana', 1246),
cls('DEADDAYS', 'horror/dead-days', 293),
cls('Debunkers', 'challenge/debunkers', 148475),
cls('DEEP', 'thriller/deep', 364),
cls('Defects', 'challenge/defects', 221106),
cls('Denma', 'sf/denma', 921),
cls('Dents', 'sf/dents', 671),
cls('Deor', 'fantasy/deor', 1663),
cls('DevilNumber4', 'supernatural/devil-no-4', 1695),
cls('DICE', 'fantasy/dice', 64),
cls('DistantSky', 'horror/distant-sky', 75),
cls('DONTHATE', 'comedy/dont-hate', 1574),
cls('DoodleForFood', 'slice-of-life/doodle-for-food', 487),
cls('DownToEarth', 'romance/down-to-earth', 1817),
cls('Dragnarok', 'fantasy/dragnarok', 1018),
cls('DragnarokDescendants', 'fantasy/dragnarok-descendants', 1433),
cls('DrawnToYou', 'challenge/drawn-to-you', 172022),
cls('DrFrost', 'drama/dr-frost', 371),
cls('DungeonMinis', 'challenge/dungeonminis', 64132),
cls('Dustinteractive', 'comedy/dustinteractive', 907),
cls('DutyAfterSchool', 'sf/duty-after-school', 370),
cls('EatFighter', 'sports/eat-fighter', 1460),
cls('EcstasyHearts', 'sports/ecstasy-hearts', 604),
cls('Edith', 'romance/edith', 1536),
cls('Eggnoid', 'sf/eggnoid', 1229),
cls('Eleceed', 'action/eleceed', 1571),
cls('Elena', 'horror/elena', 484),
cls('ElfAndWarrior', 'fantasy/elf-and-warrior', 908),
cls('EMPYREA', 'fantasy/empyrea', 1407),
cls('EpicV', 'comedy/epic-v', 353),
cls('EscapeRoom', 'thriller/escape-room', 1815),
cls('EverywhereAndNowhere', 'comedy/everywhere-and-nowhere', 1598),
cls('FAMILYMAN', 'drama/family-man', 85),
cls('FantasySketchTheGame', 'sf/fantasy-sketch', 1020),
cls('Faust', 'supernatural/faust', 522),
cls('FINALITY', 'mystery/finality', 1457),
cls('Firebrand', 'supernatural/firebrand', 877),
cls('FisheyePlacebo', 'challenge/fisheye-placebo', 101841),
cls('Flow', 'fantasy/flow', 101),
cls('FluffyBoyfriend', 'supernatural/fluffy-boyfriend', 1164),
cls('ForTheSakeOfSita', 'romance/for-the-sake-of-sita', 349),
cls('FourLeaf', 'fantasy/four-leaf', 1454),
cls('FreakingRomance', 'romance/freaking-romance', 1467),
cls('FridayForbiddenTales', 'thriller/friday', 388),
cls('FutureYou', 'challenge/future-you', 288439),
cls('GameMasters', 'challenge/game-masters', 237252),
cls('GenshinImpact', 'challenge/genshin-impact', 242646),
cls('Gepetto', 'sf/gepetto', 81),
cls('GhostsAmongTheWildFlowers', 'fantasy/ghosts-over-wild-flowers', 718),
cls('GhostTeller', 'horror/ghost-teller', 1307),
cls('GhostTheater', 'drama/ghost-theater', 1911),
cls('GhostWife', 'romance/ghost-wife', 1471),
cls('GirlsHaveABlog', 'slice-of-life/girls-have-a-blog', 1052),
cls('GirlsOfTheWilds', 'action/girls-of-the-wilds', 93),
cls('GodOfBath', 'comedy/god-of-bath', 91),
cls('GOSU', 'action/gosu', 1099),
cls('GourmetHound', 'drama/gourmet-hound', 1245),
cls('GremoryLand', 'horror/gremoryland', 1893),
cls('GuardiansOfTheVideoGame', 'sf/guardians-of-the-video-game', 368),
cls('HAPIBUNI', 'comedy/hapi-buni', 362),
cls('HardcoreLevelingWarrior', 'action/hardcore-leveling-warrior', 1221),
cls('HaveYouAnyFear', 'horror/have-you-any-fear', 1197),
cls('Haxor', 'sf/haxor', 1325),
cls('Heartwired', 'sf/heartwired', 1539),
cls('HeirsGame', 'drama/heirs-game', 1445),
cls('HeliosFemina', 'fantasy/helios-femina', 638),
cls('HelloWorld', 'slice-of-life/hello-world', 827),
cls('Hellper', 'fantasy/hellper', 185),
cls('HeroineChic', 'super-hero/heroine-chic', 561),
cls('HIVE', 'thriller/hive', 65),
cls('Hooky', 'fantasy/hooky', 425),
cls('HoovesOfDeath', 'fantasy/hooves-of-death', 1535),
cls('HouseOfStars', 'fantasy/house-of-stars', 1620),
cls('HowToBecomeADragon', 'fantasy/how-to-become-a-dragon', 1973),
cls('HowToLove', 'slice-of-life/how-to-love', 472),
cls('IDontWantThisKindOfHero', 'super-hero/i-dont-want-this-kind-of-hero', 98),
cls('IF', 'action/if', 1925),
cls('IllusionsOfAdulting', 'slice-of-life/illusions-of-adulting', 922),
cls('IllustratedInternet', 'comedy/illustrated-internet', 750),
cls('ILoveYoo', 'drama/i-love-yoo', 986),
cls('ImmortalNerd', 'slice-of-life/immortal-nerd', 579),
cls('ImTheGrimReaper', 'supernatural/im-the-grim-reaper', 1697),
cls('Inarime', 'super-hero/inarime', 675),
cls('InternetExplorer', 'challenge/internet-explorer', 219164),
cls('InTheBleakMidwinter', 'sf/in-the-bleak-midwinter', 1946),
cls('ItsMine', 'drama/its-mine', 2010),
cls('JackieRose', 'supernatural/jackie-rose', 613),
cls('JingleJungle', 'slice-of-life/jingle-jungle', 282),
cls('JustAskYuli', 'slice-of-life/just-ask-yuli', 402),
cls('JustForKicks', 'slice-of-life/just-for-kicks', 1152),
cls('JustFriends', 'challenge/just-friends', 190722),
cls('JustPancakes', 'comedy/just-pancakes', 1651),
cls('KidsAreAllRight', 'drama/kids-are-all-right', 283),
cls('Killstagram', 'thriller/killstagram', 1971),
cls('KindOfConfidential', 'romance/kind-of-confidential', 663),
cls('KindOfLove', 'slice-of-life/kind-of-love', 1850),
cls('KnightRun', 'sf/knight-run', 67),
cls('Kubera', 'fantasy/kubera', 83),
cls('LalinsCurse', 'supernatural/lalins-curse', 1601),
cls('Lars', 'slice-of-life/lars', 358),
cls('LateBloomer', 'romance/late-bloomer', 988),
cls('LavenderJack', 'super-hero/lavender-jack', 1410),
cls('LESSA', 'action/lessa', 89),
cls('LESSA2TheCrimsonKnight', 'action/lessa-2', 507),
cls('LetsPlay', 'romance/letsplay', 1218),
cls('LibraryGhost', 'comedy/library-ghost', 220),
cls('LifeOutsideTheCircle', 'drama/life-outside-the-circle', 1260),
cls('LittleMatchaGirl', 'fantasy/little-matcha-girl', 1665),
cls('LiveForever', 'thriller/live-forever', 1312),
cls('LiveWithYourself', 'comedy/live-with-yourself', 919),
cls('Lone', 'fantasy/lone', 1929),
cls('Lookism', 'drama/lookism', 1049),
cls('LoreOlympus', 'romance/lore-olympus', 1320),
cls('Lorna', 'slice-of-life/lorna', 1284),
cls('LostInTranslation', 'drama/lost-in-translation', 1882),
cls('LoveAdviceFromTheGreatDukeOfHell', 'comedy/love-advice', 1498),
cls('LoveMeKnot', 'romance/love-me-knot', 2224),
cls('Lozolz', 'tiptoon/lozolz', 1268),
cls('LUFF', 'romance/luff', 1489),
cls('Luggage', 'fantasy/luggage', 1642),
cls('LUMINE', 'fantasy/lumine', 1022),
cls('Lunarbaboon', 'slice-of-life/lunarbaboon', 523),
cls('MageAndDemonQueen', 'comedy/mage-and-demon-queen', 1438),
cls('Magical12thGraders', 'super-hero/magical-12th-graders', 90),
cls('Magician', 'fantasy/magician', 70),
cls('MagicSodaPop', 'fantasy/magic-soda-pop', 1947),
cls('MarryMe', 'romance/marry-me', 1951),
cls('MatchmakerHero', 'sf/matchmaker-hero', 1569),
cls('MelvinasTherapy', 'horror/melvinas-therapy', 1021),
cls('MeowMan', 'comedy/meow-man', 1677),
cls('MercWorks', 'slice-of-life/mercworks', 426),
cls('Messenger', 'fantasy/messenger', 1382),
cls('MetaphoricalHER', 'drama/metaphorical-her', 1475),
cls('MidnightPoppyLand', 'romance/midnight-poppy-land', 1798),
cls('MidnightRain', 'drama/midnight-rain', 1797),
cls('MidnightRhapsody', 'slice-of-life/midnight-rhapsody', 116),
cls('MidnightRhapsodySeason2', 'slice-of-life/midnight-rhapsody-season2', 365),
cls('MissAbbottAndTheDoctor', 'romance/miss-abbott-and-the-doctor', 707),
cls('MOONBEARD', 'comedy/moon-beard', 471),
cls('MoonYou', 'supernatural/moonyou', 1340),
cls('Murrz', 'slice-of-life/murrz', 1281),
cls('Muted', 'supernatural/muted', 1566),
cls('MyBoo', 'supernatural/my-boo', 1185),
cls('MyAssassinGirlfriend', 'challenge/my-assassin-girlfriend', 249007),
cls('MyDearColdBloodedKing', 'romance/my-dear-cold-blooded-king', 961),
cls('MyDeepestSecret', 'thriller/my-deepest-secret', 1580),
cls('MyDictatorBoyfriend', 'comedy/my-dictator-boyfriend', 1391),
cls('MyDragonGirlfriend', 'challenge/my-dragon-girlfriend', 162918),
cls('MyGiantNerdBoyfriend', 'slice-of-life/my-giant-nerd-boyfriend', 958),
cls('MyKittyAndOldDog', 'slice-of-life/my-kitty-and-old-dog', 184),
cls('MyNameIsBenny', 'slice-of-life/my-name-is-benny', 1279),
cls('MyWallflowerKiss', 'challenge/my-wallflower-kiss', 151869),
cls('NanoList', 'sf/nano-list', 700),
cls('NationalDogDay2016', 'slice-of-life/national-dog-day', 747),
cls('NewLifeProject', 'comedy/new-life-project', 279),
cls('Newman', 'fantasy/newman', 405),
cls('NewNormalClass8', 'drama/new-normal-class-8', 100),
cls('Nicholalala', 'slice-of-life/nicholalala', 418),
cls('NightmareFactory', 'thriller/nightmare-factory', 616),
cls('Noblesse', 'action/noblesse', 87),
cls('NoblesseRaisAdventure', 'action/noblesse-spin-off', 608),
cls('NoScope', 'sports/no-scope', 1572),
cls('NotEvenBones', 'thriller/not-even-bones', 1756),
cls('NothingSpecial', 'fantasy/nothing-special', 1188),
cls('OddGirlOut', 'drama/odd-girl-out', 1420),
cls('OhHoly', 'romance/oh-holy', 809),
cls('OmniscientReader', 'action/omniscient-reader', 2154),
cls('ORANGEMARMALADE', 'romance/orange-marmalade', 97),
cls('Outrage', 'super-hero/outrage', 1450),
cls('OVERPOWERED', 'challenge/overpowered', 85292),
cls('PacificRimAmara', 'sf/pacific-rim-amara', 1327),
cls('PenguinLovesMev', 'slice-of-life/penguin-loves-mev', 86),
cls('Petrichor', 'challenge/petrichor', 100835),
cls('PhantomParadise', 'fantasy/phantom-paradise', 1250),
cls('Phase', 'romance/phase', 2117),
cls('Pigminted', 'slice-of-life/pigminted', 482),
cls('PinchPoint', 'challenge/pinch-point-reborn', 334640),
cls('Plum', 'sports/plum', 1605),
cls('Polidiocy', 'comedy/polidiocy', 676),
cls('Pound', 'action/pound', 1496),
cls('PowerBallad', 'super-hero/power-ballad', 987),
cls('PurpleHyacinth', 'mystery/purple-hyacinth', 1621),
cls('Punderworld', 'challenge/punderworld', 312584),
cls('RandomChat', 'drama/random-chat', 1669),
cls('RANDOMPHILIA', 'comedy/randomphilia', 386),
cls('Rebirth', 'sf/rebirth', 1412),
cls('RefundHighSchool', 'fantasy/refundhighschool', 1360),
cls('RiseFromAshes', 'supernatural/rise-from-ashes', 959),
cls('RoarStreetJournal', 'slice-of-life/roar-street-journal', 397),
cls('RoomOfSwords', 'sf/room-of-swords', 1261),
cls('RotAndRuin', 'horror/rot-and-ruin', 1878),
cls('SafelyEndangered', 'comedy/safely-endangered', 352),
cls('SaltyStudio', 'romance/salty-studio', 74),
cls('SaphieTheOneEyedCat', 'slice-of-life/saphie-one-eyed-cat', 670),
cls('SAVEME', 'drama/bts-save-me', 1514),
cls('ScoobandShag', 'challenge/scoob-and-shag', 210827),
cls('ScorchingRomance', 'romance/scorching-romance', 1662),
cls('Seed', 'sf/seed', 1480),
cls('SHADOW', 'super-hero/shadow', 281),
cls('ShadowPirates', 'action/shadow-pirates', 1455),
cls('Shard', 'supernatural/shard', 960),
cls('Shiloh', 'thriller/shiloh', 1649),
cls('ShootAround', 'drama/shoot-around', 399),
cls('Shriek', 'thriller/shriek', 772),
cls('SID', 'supernatural/sid', 497),
cls('SIDEKICKS', 'super-hero/sidekicks', 92),
cls('SimonSues', 'supernatural/simon-sues', 1619),
cls('SirensLament', 'romance/sirens-lament', 632),
cls('Sithrah', 'fantasy/sithrah', 524),
cls('SkateFire100', 'sports/skate-fire-100', 1674),
cls('SmallWorld', 'slice-of-life/small-world', 1159),
cls('SmileBrush', 'slice-of-life/smile-brush', 94),
cls('SmileBrushMyOldPictures', 'slice-of-life/smile-brush-my-old-pictures', 302),
cls('Snailogy', 'slice-of-life/snailogy', 387),
cls('SOLEIL', 'fantasy/soleil', 1823),
cls('SOULCARTEL', 'fantasy/soul-cartel', 72),
cls('SoulOnHold', 'supernatural/soul-on-hold', 1701),
cls('SpaceBoy', 'sf/space-boy', 400),
cls('SpaceVixen', 'challenge/space-vixen-deep-space-k9', 207049),
cls('SpellsFromHell', 'fantasy/spells-from-hell', 2431),
cls('SpiritFingers', 'drama/spirit-fingers', 1577),
cls('Spirits', 'fantasy/spirits-re', 1348),
cls('StalkerXStalker', 'challenge/stalker-x-stalker', 245662),
cls('STARCROSS', 'super-hero/star-cross', 1599),
cls('StayingHealthyTogether', 'tiptoon/staying-healthy-together', 1963),
cls('StrawberrySeafoam', 'fantasy/strawberry-seafoam', 1248),
cls('SubtleDisaster', 'drama/subtle-disaster', 350),
cls('SubZero', 'romance/subzero', 1468),
cls('SuitorArmor', 'fantasy/suitor-armor', 2159),
cls('SuperSecret', 'romance/super-secret', 666),
cls('SupersonicGirl', 'super-hero/supersonic-girl', 633),
cls('SweetHome', 'thriller/sweethome', 1285),
cls('SwimmingLessonsForAMermaid', 'romance/swimming-lessons-for-a-mermaid', 1912),
cls('SwordInterval', 'supernatural/sword-interval', 486),
cls('TalesOfTheUnusual', 'horror/tales-of-the-unusual', 68),
cls('TheAdvancedPlayerOfTheTutorialTower', 'action/the-advanced-player-of-the-tutorial-tower', 2409),
cls('TheBadguys', 'super-hero/the-bad-guys', 701),
cls('TheBrooklynite', 'super-hero/the-brooklynite', 813),
cls('TheCliff', 'thriller/the-cliff', 80),
cls('TheCroaking', 'fantasy/the-croaking', 1494),
cls('TheDaneMen', 'comedy/the-danemen', 395),
cls('TheDevilIsAHandsomeMan', 'drama/the-devil-is-a-handsome-man', 1311),
cls('TheDoctorsAreOut', 'romance/the-doctors-are-out', 1910),
cls('TheFeverKing', 'super-hero/the-fever-king', 1659),
cls('TheFourOfThem', 'drama/the-four-of-them', 1524),
cls('TheGamer', 'action/the-gamer', 88),
cls('TheGentlemansArmchair', 'comedy/the-gentlemans-armchair', 469),
cls('TheGirlDownstairs', 'romance/the-girl-downstairs', 1809),
cls('THEGIRLFROMCLASS', 'drama/the-girl-from-class', 73),
cls('TheGodOfHighSchool', 'action/the-god-of-high-school', 66),
cls('TheKissBet', 'romance/the-kiss-bet', 1617),
cls('TheLifeOfTheThreeBears', 'slice-of-life/the-life-of-the-three-bears', 390),
cls('ThePurpleHeart', 'super-hero/the-purple-heart', 723),
cls('TheRedBook', 'horror/the-red-book', 467),
cls('TheRedHook', 'super-hero/the-red-hook', 643),
cls('TheRedKing', 'supernatural/the-red-king', 1687),
cls('TheShadowProphet', 'drama/the-shadow-prophet', 1881),
cls('TheSoundOfYourHeart', 'comedy/the-sound-of-your-heart', 269),
cls('TheSteamDragonExpress', 'fantasy/steam-dragon-express', 1270),
cls('TheStoriesOfThoseAroundMe', 'romance/the-stories-of-those-around-me', 96),
cls('TheStrangeTalesOfOscarZahn', 'fantasy/the-strange-tales-of-oscar-zahn', 685),
cls('TheVaultOfHorrorACollectionOfNightmares', 'horror/the-vault-of-horror-a-collection-of-nightmares', 295),
cls('TheWeeklyRoll', 'challenge/the-weekly-roll', 358889),
cls('TheWeightOfOurSky', 'historical/the-weight-of-our-sky', 1739),
cls('TheWitchAndTheBull', 'fantasy/the-witch-and-the-bull', 1892),
cls('TheWolfmanOfWulvershire', 'mystery/the-wolfman-of-wulvershire', 1784),
cls('TheWorldWhereIBelong', 'supernatural/the-world-where-i-belong', 1318),
cls('TheWrathAndTheDawn', 'fantasy/the-wrath-and-the-dawn', 1772),
cls('ThirdShiftSociety', 'supernatural/third-shift-society', 1703),
cls('Thornstone', 'fantasy/thornstone', 1612),
cls('TickleTown', 'comedy/tickle-town', 428),
cls('ToasterDude', 'comedy/toaster-dude', 1983),
cls('TokyoThreatDocumentationProject', 'challenge/tokyo-threat-documentation-project', 417973),
cls('TowerOfGod', 'fantasy/tower-of-god', 95),
cls('TrailerParkWarlock', 'comedy/trailer-park-warlock', 1512),
cls('TrashBird', 'comedy/trash-bird', 473),
cls('TrueBeauty', 'romance/truebeauty', 1436),
cls('Trump', 'fantasy/trump', 84),
cls('UndeadEd', 'supernatural/undeaded', 468),
cls('UnderPrin', 'supernatural/underprin', 78),
cls('UnderTheAegis', 'fantasy/under-the-aegis', 436),
cls('UnholyBlood', 'supernatural/unholy-blood', 1262),
cls('UnknownCaller', 'thriller/ar-toon', 775),
cls('UnlovableReplacement', 'romance/unlovable-replacement', 1762),
cls('UnluckyIsAsLuckyDoes', 'comedy/unlucky-is-as-lucky-does', 1554),
cls('UnOrdinary', 'super-hero/unordinary', 679),
cls('UnTouchable', 'romance/untouchable', 79),
cls('UpAndOut', 'slice-of-life/up-and-out', 488),
cls('UrbanAnimal', 'super-hero/urban-animal', 1483),
cls('Uriah', 'horror/uriah', 1607),
cls('VarsityNoir', 'mystery/varsity-noir', 1613),
cls('VersionDayAndNight', 'drama/version-day-and-night', 1796),
cls('WafflesAndPancakes', 'slice-of-life/waffles-and-pancakes', 1310),
cls('WarCry', 'super-hero/war-cry', 1247),
cls('WarningLabel', 'romance/warning-label', 1051),
cls('Watermelon', 'fantasy/watermelon', 1435),
cls('WeakHero', 'action/weakhero', 1726),
cls('WEBTOONGREENLiGHT', 'action/webtoon-greenlight', 1988),
cls('WestwoodVibrato', 'drama/westwood-vibrato', 537),
cls('WhereTangentsMeet', 'romance/where-tangents-meet', 421),
cls('WindBreaker', 'sports/wind-breaker', 372),
cls('WinterMoon', 'fantasy/winter-moon', 1093),
cls('WinterWoods', 'drama/winter-woods', 344),
cls('WitchCreekRoad', 'horror/witch-creek-road', 1453),
cls('WitchHunt', 'supernatural/witch-hunt', 363),
cls('Wolfsbane', 'horror/wolfsbane', 1826),
cls('XINK3R', 'super-hero/xinker', 541),
cls('YourAdventure', 'comedy/your-adventure', 506),
cls('YourLetter', 'drama/your-letter', 1540),
cls('YouveGottaBeKittenMe', 'challenge/youve-gotta-be-kitten-me', 383661),
cls('YumisCells', 'slice-of-life/yumi-cell', 478),
cls('YunaAndKawachan', 'drama/yuna-and-kawachan', 1840),
cls('ZeroGame', 'fantasy/zero-game', 1704),
cls('ZomCom', 'challenge/zomcom', 70195),
# END AUTOUPDATE
)
|
webcomics/dosage
|
dosagelib/plugins/webtoons.py
|
Python
|
mit
| 27,799
|
[
"CRYSTAL"
] |
34504c4f2ec7f6d81401fe7b176d860b62ab8279a8ea566004590f3c929cfc3e
|
from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindat=lm.load_numbers('../data/fm_train_real.dat')
testdat=lm.load_numbers('../data/fm_test_real.dat')
train_label=lm.load_labels('../data/label_train_twoclass.dat')
parameter_list=[[traindat,testdat, train_label,10,2.1,1.2,1e-5,False],
[traindat,testdat,train_label,10,2.1,1.3,1e-4,False]]
def classifier_libsvm (fm_train_real=traindat,fm_test_real=testdat,
label_train_twoclass=train_label,
size_cache=10, width=2.1,C=1.2,
epsilon=1e-5,use_bias=False):
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'LIBSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
kernel_matrix = sg('get_kernel_matrix', 'TEST')
return result, kernel_matrix
if __name__=='__main__':
print('LibSVM')
classifier_libsvm(*parameter_list[0])
|
curiousguy13/shogun
|
examples/undocumented/python_static/classifier_libsvm.py
|
Python
|
gpl-3.0
| 1,043
|
[
"Gaussian"
] |
4ad930d8097646de5826eb984ae974880b595aba650a0ef36d721bc53ebabd4b
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
class HomogeneousMagneticFieldTest(ut.TestCase):
system = espressomd.System(box_l=[3.0, 3.0, 3.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
np.random.seed(seed=42)
def tearDown(self):
self.system.constraints.clear()
def test_setter_and_getter(self):
H_field1 = np.array([0.0, 1.0, 0.0])
H_field2 = np.array([3.533, 5.842, 0.127])
H_constraint = espressomd.constraints.HomogeneousMagneticField(
H=H_field1)
np.testing.assert_almost_equal(np.copy(H_constraint.H), H_field1)
H_constraint.H = H_field2
np.testing.assert_almost_equal(np.copy(H_constraint.H), H_field2)
def test_default_value(self):
H_field_default = np.array([1.0, 0.0, 0.0])
H_constraint = espressomd.constraints.HomogeneousMagneticField()
np.testing.assert_almost_equal(
np.copy(H_constraint.H),
H_field_default)
@utx.skipIfMissingFeatures(["DIPOLES"])
def test_add_energy_and_forces(self):
H_field = [5.0, 3.0, 2.0]
dip_mom0 = [2.0, 6.0, 1.]
dip_mom1 = [-1.0, 0.5, -0.2]
# check that the dipolar energy is zero initially, ...
self.assertEqual(self.system.analysis.energy()["dipolar"], 0.0)
H_constraint = espressomd.constraints.HomogeneousMagneticField(
H=H_field)
self.system.constraints.add(H_constraint)
# ... and also after adding the constraint
self.assertEqual(self.system.analysis.energy()["dipolar"], 0.0)
# check dipolar energy when adding dipole moments
p0 = self.system.part.add(
pos=[0, 0, 0], dip=dip_mom0, rotation=(1, 1, 1))
self.assertEqual(self.system.analysis.energy()["dipolar"],
-1.0 * np.dot(H_field, dip_mom0))
p1 = self.system.part.add(
pos=[1, 1, 1], dip=dip_mom1, rotation=(1, 1, 1))
self.assertEqual(self.system.analysis.energy()["dipolar"],
-(np.dot(H_field, dip_mom0) +
np.dot(H_field, dip_mom1)))
if espressomd.has_features(["ROTATION"]):
# check that running the integrator leads to expected torques
self.system.integrator.run(0)
torque_expected0 = np.cross(dip_mom0, H_field)
torque_expected1 = np.cross(dip_mom1, H_field)
for i in range(3):
self.assertAlmostEqual(
p0.torque_lab[i],
torque_expected0[i],
places=10)
self.assertAlmostEqual(
p1.torque_lab[i],
torque_expected1[i],
places=10)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/constraint_homogeneous_magnetic_field.py
|
Python
|
gpl-3.0
| 3,558
|
[
"ESPResSo"
] |
cedc3c7ff981c811661338ab84c7eb45e87a4221db7407646711c287be596ae7
|
#!/usr/bin/env python
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Deploys Fuchsia packages to a package repository in a Fuchsia
build output directory."""
import pkg_repo
import argparse
import os
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--package',
action='append',
required=True,
help='Paths to packages to install.')
parser.add_argument('--fuchsia-out-dir',
required=True,
help='Path to a Fuchsia build output directory. '
'Setting the GN arg '
'"default_fuchsia_build_dir_for_installation" '
'will cause it to be passed here.')
args, _ = parser.parse_known_args()
assert args.package
fuchsia_out_dir = os.path.expanduser(args.fuchsia_out_dir)
repo = pkg_repo.ExternalPkgRepo(os.path.join(fuchsia_out_dir, 'amber-files'),
os.path.join(fuchsia_out_dir, '.build-id'))
print('Installing packages and symbols in package repo %s...' %
repo.GetPath())
for package in args.package:
repo.PublishPackage(package)
print('Installation success.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
nwjs/chromium.src
|
build/fuchsia/deploy_to_pkg_repo.py
|
Python
|
bsd-3-clause
| 1,408
|
[
"Amber"
] |
f1d7af5f5e5fa92c60d938ad6338ff3f07a5fd3452595ae5f05216fc8ee7b2c4
|
#
# Mike Dvorak
# Sail Tactics
# mike@sailtactics.com
#
# Created: 2014-12-26
# Modified:
#
#
# Description: Manages SUNTANS tide forecast netCDF data in a WindDB2.
#
#
import numpy
import scipy.io.netcdf as nc
import argparse
from windb2 import insert, util
import os
import tempfile
from datetime import datetime
import math
import psycopg2
import sys
import logging
import re
import logging
import pytz
# Set up logging for this package
logger = logging.getLogger('windb2')
"""Inserts a netCDF file with SUNTANS tidal current output into a WinDB2 database.
*
* windb2Conn - Connection to a WinDB2 database.
* ncFile - Either an open file or a string name of a file to open.
* domainKey - Existing domain key in the database. If left blank, a new domain will be created.
* replaceData - Deletes data for the same time in the database if True. Useful for freshening data.
*
* returns timesInsertedList, domainKey - A list of times inserted in ISO time format, and the
domainKey where the data was inserted.
"""
def insertNcFile(windb2_conn, ncFile, domainKey=None, tableName="current", replaceData=False, sqlWhere="true"):
# Connect to the WinDB
inserter = insert.Insert(windb2_conn)
# Open the tide netCDF file
print('netCDF file type passed to suntans.insertNcFile=', type(ncFile))
if type(ncFile) != nc.netcdf_file:
ncFile = nc.netcdf_file(ncFile, 'r')
# Get the grid dimensions and coordinates
nLong = ncFile.dimensions['west_east']
nLat = ncFile.dimensions['south_north']
nTime = ncFile.variables['Times'].shape[0] # 'Time' dim is UNLIMITED
lonArr = ncFile.variables['utm_easting']
latArr = ncFile.variables['utm_northing']
timeArr = ncFile.variables['Times']
u = ncFile.variables['u_top1m']
v = ncFile.variables['v_top1m']
# Create a new domain if necessary
if domainKey == None:
domainKey = str(inserter.create_new_domain("SF Bay currents ", "SUNTANS", 200, 'm'))
inserter.insert_horiz_geom(domainKey, lonArr, latArr, 26910) # EPSG:26910: NAD83 / UTM zone 10N
inserter.create_new_table(domainKey, tableName, ("speed", "direction"), ("real", "smallint"),
("speed_positive","direction_degree"), ("speed >= 0","direction >= 0 AND direction < 360"))
else:
# Make sure it's a string so that we don't have concatenation problems later
domainKey = str(domainKey)
# Get the geomkeys associated with the WRF coordinates
horizGeomKey = inserter.calculateHorizWindGeomKeys(domainKey, nLong, nLat)
# Create a counter to execute every so often
counter = 0
startTime = datetime.now()
# Pass the timearr through the timearr filter, even if no filter is set to return Postres
# compliant timestamps. See Python datetime.datetime for datetime format details
timesToInsert = windb2_conn.filterTimes(timeArr, '%Y-%m-%d_%H:%M:%S', sqlWhere=sqlWhere)
# Info
print('Reduced the number of times to insert by ',
round((1 - float(len(timesToInsert)) / timeArr.shape[0]) * 100, 1), '%')
# Iterate through the times that we want to insert
ttiCount = 0
tncfCount = 0
timeValuesToReturn = []
while ttiCount < len(timesToInsert) and tncfCount < nTime:
# Only insert if this is a time we want to insert
tncf = datetime.strptime(timeArr[tncfCount].tostring().decode('UTF-8'), '%Y-%m-%d_%H:%M:%S').replace(tzinfo=pytz.utc)
tti = timesToInsert[ttiCount]
if tti != tncf:
tncfCount += 1
continue
# Open a temporary file to COPY from
temp_file = tempfile.NamedTemporaryFile(mode='w+b')
# Create the time in GeoServer/GeoWebCache format
timeValuesToReturn.append(tncf.strftime('%Y-%m-%dT%H:%M:%S.000Z'))
# Info
print('Processing time: ', timeValuesToReturn[-1])
# Iterate through the x,y, and timearr and insert the tidal current data
for x in range(horizGeomKey.shape[0]):
for y in range(horizGeomKey.shape[1]):
# Make sure that this is actually a x,y point we want to insert
# In order to create a mask of selective insert points, all
# a horizGeomKey of zero means we don't want to insert this one
if horizGeomKey[x,y]==0:
continue;
# Write the data string to the temp file
if not numpy.isnan(u[tncfCount,y,x]):
# Calculate speed
speed = math.sqrt(math.pow(u[tncfCount,y,x],2) + math.pow(v[tncfCount,y,x],2))
# Calculate direction (using the 'flow' convention for tides)
dir = int(util.calc_dir_deg(u[tncfCount,y,x], v[tncfCount,y,x]))
# Add this row to be inserted into the database
str_to_write = '{},{},{},{},{},{}\n'.format(domainKey, horizGeomKey[x,y],
tncf.strftime('%Y-%m-%d %H:%M:%S %Z'), speed, dir, 0)
temp_file.write(bytes(str_to_write, 'utf-8'))
counter += 1
# Insert the data at height 0 for tidal current
temp_file.flush()
insertColumns = ('domainkey', 'geomkey', 't', 'speed', 'direction', 'height')
try:
windb2_conn.curs.copy_from(open(temp_file.name, 'rb'), tableName + '_' + domainKey, sep=',', columns=insertColumns)
except psycopg2.IntegrityError as e:
# Delete the duplicate data
errorTest = 'duplicate key value violates unique constraint "' + tableName + "_" + domainKey + '_domainkey_geomkey_t_height_key"'
if re.search(errorTest, str(e)):
# Delete the data and retry the insert if asked to replace data in the function call
if replaceData:
# Rollback to the last commit (necessary to reset the database connection)
windb2_conn.conn.rollback()
# Delete that timearr (assumes UTC timearr zone)
sql = 'DELETE FROM ' + tableName + '_' + domainKey + ' WHERE t = timestamp with time zone\'' + \
tti.strftime('%Y-%m-%d %H:%M:%S %Z') + '\' ' + \
'AND height=0'
print("Deleting conflicting times: " + sql)
windb2_conn.curs.execute(sql)
windb2_conn.conn.commit()
# Reinsert that timearr
windb2_conn.curs.copy_from(open(temp_file.name, 'r'), tableName + '_' + domainKey, sep=',', columns=insertColumns)
# Otherwise, just notify that the insert failed because of duplicate data
else:
logging.error("ERROR ON INSERT: ", e.message)
logging.error("Use 'replaceData=True' if you want the data to be reinserted.")
raise
# Commit the changes
windb2_conn.conn.commit()
# Calaculate the insert rate
elapsedTime = (datetime.now() - startTime).seconds
if elapsedTime > 0:
insertRate = counter / elapsedTime
print("Inserted ", counter, " x,y wind points at ", insertRate, " I/s")
# Close the tempfile so it is deleted
temp_file.close()
# Increment the time
ttiCount += 1
return timeValuesToReturn
|
sailorsenergy/windb2
|
windb2/model/suntans/suntans.py
|
Python
|
gpl-3.0
| 7,472
|
[
"NetCDF"
] |
76d20036daa4f7762b3f7ed1ed55562b2b498e58baa6c67c19a169b9463523b5
|
"""Implementation of the text encoder.
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from models.layers import GatedEmbeddingModule, NetVLAD
class TextEncoder(tf.keras.Model):
"""Implementation of Text Encoder.
This model takes in contextual embeddings from a language model and maps
them to a fixed length, sharded embedding that corresponds to embeddings
produced by the corresponding video encoder. This model should be trained
in concert with a video encoder.
The contextual embeddings are first aggregated using NetVLAD to a fixed
length vector. Then, for each expert in the corresponding video encoder,
this fixed length vector is passed through a Gated Embedding Module to
produce a shard of an embedding. The fixed length vector is also passed
through a single dense layer to produce weights that specify the relative
importance of the shards of the embeddings.
Attributes:
num_of_experts: the number of experts used.
num_netvlad_clusters: the number of clusters in the NetVLAD model.
language_model_dimensionality: the length of the last dimension of
the contextual embeddings.
netvlad: a NetVLAD model, used for aggregation embeddings.
encoded_expert_dimensionality: dimensionality of each expert in the
final embedding.
gems: a list of gated embedding modules, one per expert.
dense_layers: a list of dense layers, one per expert.
"""
def __init__(self,
num_of_experts,
num_netvlad_clusters,
ghost_clusters,
language_model_dimensionality,
encoded_expert_dimensionality,
residual_cls_token=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros"):
"""Initialize this model.
Args:
num_of_experts: number of experts used in the video encoder.
num_netvlad_clusters: number of clusters in NetVLAD.
ghost_clusters: number of ghost clusters in NetVLAD.
language_model_dimensionality: last dimension of output of language
model.
encoded_expert_dimensionality: the dimensionality video experts
embeddings are computed down to.
residual_cls_token: a boolean indicating if the CLS output from the
language model should be seperated from the other embeddings. If
this is True, the first token output from the language model is
not inputted to NetVLAD. Instead, it's appended to the
aggregated outputs from NetVLAD.
kernel_initializer: the strategy used to initialize the weights in
dense layers' kernel. The default is glorot uniform, the default
strategy for keras.
bias_initial: the strategy used to initialize the weights in dense
layers' biases. The default is zeros, the default strategy for
keras.
"""
super(TextEncoder, self).__init__()
self.num_of_experts = num_of_experts
self.num_netvlad_clusters = num_netvlad_clusters
self.language_model_dimensionality = language_model_dimensionality
self.netvlad = NetVLAD(num_netvlad_clusters, ghost_clusters)
self.encoded_expert_dimensionality = encoded_expert_dimensionality
self.residual_cls_token = residual_cls_token
self.make_gems(
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self.make_dense_layers(
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
def make_gems(self, kernel_initializer, bias_initializer):
"""Initialize gated embedding modules."""
self.gems = []
for _ in range(self.num_of_experts):
self.gems.append(GatedEmbeddingModule(
self.encoded_expert_dimensionality,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
def make_dense_layers(self, kernel_initializer, bias_initializer):
"""Make dense layer used for generating mixture of embedding weights.
Note: "moe" stands for mixture of embeddings weights.
"""
self.moe_dense = tf.keras.layers.Dense(
self.num_of_experts,
activation="softmax",
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
def call(self, contextual_embeddings):
"""Executes a forward pass on the text encoder.
First, the text is aggregated using netvlad. These aggregated
text embeddings are inputted to each gated embedding module to generate
the normalized embeddings. The aggregated text embeddings are also
inputted into a dense layer to generate the mixture weights.
Args:
contextual_embeddings: a batch of contextual embeddings.
Returns: a tuple of two elements. First, a list of embeddings for the
text captions. Each element of this list is a tensor of shape batch size
x encoded expert dimensionality. Second, a tensor of mixture weights
for the embeddings of shape batch size x number of experts.
"""
if self.residual_cls_token:
cls_token = contextual_embeddings[:, 0, :]
aggregated_embeddings = self.netvlad(
contextual_embeddings[:, 1:, :])
aggregated_embeddings = tf.concat([
cls_token, aggregated_embeddings], axis=1)
else:
aggregated_embeddings = self.netvlad(contextual_embeddings)
expert_embeddings = []
for expert_gated_embedding_module in self.gems:
expert_embeddings.append(expert_gated_embedding_module(
aggregated_embeddings))
mixture_weights = self.moe_dense(aggregated_embeddings)
return expert_embeddings, mixture_weights
|
googleinterns/via-content-understanding
|
videoretrieval/models/components/textencoder.py
|
Python
|
apache-2.0
| 6,575
|
[
"MOE"
] |
f5d47583e3f9014adec77db9a994a54bee7013bc519a28c326aa9a5004d6d720
|
import math
import core
import euclid
# NOTE:
# Dear developers,
# If classes will be used as base class in geometries use LB in front of class names
# This will avoid name confusions between the libraries and all the geometry libraries
# can use the same class name (e.g Sunpath)
class LBSunpath(object):
"""
Calculates sun path
Attributes:
latitude: The latitude of the location. Values must be between -90 and 90. Default is set to the equator.
northAngle: Angle to north (0-360). 90 is west and 270 is east (Default: 0)
longitude: The longitude of the location (Default: 0)
timeZone: A number representing the time zone of the location you are constructing. This can improve the accuracy of the resulting sun plot. The time zone should follow the epw convention and should be between -12 and +12, where 0 is at Greenwich, UK, positive values are to the East of Greenwich and negative values are to the West.
daylightSavingPeriod: An analysis period for daylight saving. (Default = None)
Usage:
import ladybug.sunpath as sunpath
# initiate sunpath
sp = sunpath.LBSunpath(50)
sun = sp.calculateSun(1, 1, 12) # calculate sun data for Jan 1 at noon
print sun.azimuth, sun.altitude
"""
def __init__(self, latitude = 0, northAngle = 0, longitude = 0, timeZone = 0,
daylightSavingPeriod = None):
self.__latitude = math.radians(float(latitude))
self.__longitude = math.radians(float(longitude))
self.northAngle = northAngle
self.timeZone = timeZone
self.daylightSavingPeriod = daylightSavingPeriod
@classmethod
def fromLocation(cls, location, northAngle = 0, daylightSavingPeriod = None):
return cls(location.latitude, northAngle, location.longitude, \
location.timeZone, daylightSavingPeriod)
@property
def latitude(self):
"""get latitude in degrees"""
return math.degrees(self.__latitude)
@latitude.setter
def latitude(self, value):
"""set latitude value in degrees"""
self.__latitude = math.radians(float(value))
@property
def longitude(self):
"""get latitude in degrees"""
return math.degrees(self.__latitude)
@longitude.setter
def longitude(self, value):
"""set latitude value in degrees"""
self.__longitude = math.radians(float(value))
def isDaylightSavingHour(self, datetime):
if not self.daylightSavingPeriod: return False
return self.daylightSavingPeriod.isTimeIncluded(datetime.HOY)
def calculateSun(self, month, day, hour, isSolarTime = False):
"""Get Sun data for an hour of the year
Args:
month: An integer between 1-12
day: An integer between 1-31
hour: A positive number <= 24
isSolarTime: A boolean to indicate if the input hour is solar time. (Default: False)
Returns:
A sun object for this particular time
"""
datetime = core.LBDateTime(month, day, hour)
return self.calculateSunFromDataTime(datetime, isSolarTime)
def calculateSunFromHOY(self, HOY, isSolarTime = False):
"""Get Sun data for an hour of the year
Args:
datetime: Ladybug datetime
isSolarTime: A boolean to indicate if the input hour is solar time. (Default: False)
Returns:
A sun object for this particular time
"""
datetime = core.LBDateTime.fromHOY(HOY)
return self.calculateSunFromDataTime(datetime, isSolarTime)
def calculateSunFromDataTime(self, datetime, isSolarTime = False):
"""Get Sun data for an hour of the year
This code is originally written by Trygve Wastvedt (Trygve.Wastvedt@gmail.com)
based on (NOAA) and modified by Chris Mackey and Mostapha Roudsari
Args:
datetime: Ladybug datetime
isSolarTime: A boolean to indicate if the input hour is solar time. (Default: False)
Returns:
A sun object for this particular time
"""
solDec, eqOfTime = self.__calculateSolarGeometry(datetime)
month, day, hour = datetime.month, datetime.day, datetime.floatHour
isDaylightSaving = self.isDaylightSavingHour(datetime.HOY)
if isDaylightSaving: hour += 1
#hours
solTime = self.__calculateSolarTime(hour, eqOfTime, isSolarTime)
#degrees
hourAngle = (solTime*15 + 180) if (solTime*15 < 0) else (solTime*15 - 180)
#RADIANS
zenith = math.acos(math.sin(self.__latitude)*math.sin(solDec) \
+ math.cos(self.__latitude)*math.cos(solDec)*math.cos(math.radians(hourAngle)))
altitude = (math.pi/2) - zenith
if hourAngle == 0.0 or hourAngle == -180.0 or hourAngle == 180.0:
if solDec < self.__latitude: azimuth = math.pi
else: azimuth = 0.0
else:
azimuth = ((math.acos(((math.sin(self.__latitude)*math.cos(zenith)) \
- math.sin(solDec))/(math.cos(self.__latitude)*math.sin(zenith))) + math.pi) % (2*math.pi)) \
if (hourAngle > 0) else \
((3*math.pi - math.acos(((math.sin(self.__latitude)*math.cos(zenith)) \
- math.sin(solDec))/(math.cos(self.__latitude)*math.sin(zenith)))) % (2*math.pi))
# create the sun for this hour
return LBSun(datetime, altitude, azimuth, isSolarTime, isDaylightSaving, self.northAngle)
def calculateSunriseSunset(self, month, day, depression = 0.833, isSolarTime = False):
datetime = core.LBDateTime(month, day, hour = 12)
return self.calculateSunriseSunsetFromDateTime(datetime, depression, isSolarTime)
# TODO: implement solar time
def calculateSunriseSunsetFromDateTime(self, datetime, depression = 0.833, isSolarTime = False):
"""Calculate sunrise, sunset and noon for a day of year"""
solDec, eqOfTime = self.__calculateSolarGeometry(datetime)
# calculate sunrise and sunset hour
#if isSolarTime:
# noon = .5
#else:
noon = (720 - 4 * math.degrees(self.__longitude) - eqOfTime + self.timeZone * 60) / 1440
sunRiseHourAngle = self.__calculateSunriseHourAngle(solDec, depression)
sunrise = noon - sunRiseHourAngle * 4 / 1440
sunset = noon + sunRiseHourAngle * 4 / 1440
# convert demical hour to solar hour
# noon = self.__calculateSolarTime(24*noon, eqOfTime, isSolarTime)
# sunrise = self.__calculateSolarTime(24*sunrise, eqOfTime, isSolarTime)
# sunset = self.__calculateSolarTime(24*sunset, eqOfTime, isSolarTime)
return {
"sunrise" : core.LBDateTime(datetime.month, datetime.day, 24 * sunrise),
"noon" : core.LBDateTime(datetime.month, datetime.day, 24 * noon),
"sunset" : core.LBDateTime(datetime.month, datetime.day, 24 * sunset)}
def __calculateSolarGeometry(self, datetime, year = 2015):
"""Calculate Solar geometry for an hour of the year
Attributes:
datetime: A Ladybug datetime
Returns:
Solar declination: Solar declination in radians
eqOfTime: Equation of time as minutes
"""
month, day, hour = datetime.month, datetime.day, datetime.floatHour
a = 1 if (month < 3) else 0
y = year + 4800 - a
m = month + 12*a - 3
julianDay = day + math.floor((153*m + 2)/5) + 59
julianDay += (hour - self.timeZone)/24.0 + 365*y + math.floor(y/4) \
- math.floor(y/100) + math.floor(y/400) - 32045.5 - 59
julianCentury = (julianDay - 2451545) / 36525
#degrees
geomMeanLongSun = (280.46646 + julianCentury * (36000.76983 + julianCentury*0.0003032)) % 360
#degrees
geomMeanAnomSun = 357.52911 + julianCentury*(35999.05029 - 0.0001537*julianCentury)
eccentOrbit = 0.016708634 - julianCentury*(0.000042037 + 0.0000001267*julianCentury)
sunEqOfCtr = math.sin(math.radians(geomMeanAnomSun))*(1.914602 - julianCentury*(0.004817+0.000014*julianCentury)) + \
math.sin(math.radians(2*geomMeanAnomSun))*(0.019993-0.000101*julianCentury) + \
math.sin(math.radians(3*geomMeanAnomSun))*0.000289
#degrees
sunTrueLong = geomMeanLongSun + sunEqOfCtr
#AUs
sunTrueAnom = geomMeanAnomSun + sunEqOfCtr
#AUs
sunRadVector = (1.000001018*(1 - eccentOrbit**2))/ \
(1 + eccentOrbit*math.cos(math.radians(sunTrueLong)))
#degrees
sunAppLong = sunTrueLong - 0.00569 - 0.00478*math.sin(math.radians(125.04-1934.136*julianCentury))
#degrees
meanObliqEcliptic = 23 + (26 + ((21.448 - julianCentury*(46.815 + \
julianCentury*(0.00059 - julianCentury*0.001813))))/60)/60
#degrees
obliqueCorr = meanObliqEcliptic + 0.00256*math.cos(math.radians(125.04 - 1934.136*julianCentury))
#degrees
sunRightAscen = math.degrees(math.atan2(math.cos(math.radians(obliqueCorr))* \
math.sin(math.radians(sunAppLong)), math.cos(math.radians(sunAppLong))))
#RADIANS
solDec = math.asin(math.sin(math.radians(obliqueCorr))*math.sin(math.radians(sunAppLong)))
varY = math.tan(math.radians(obliqueCorr/2))*math.tan(math.radians(obliqueCorr/2))
#minutes
eqOfTime = 4*math.degrees(varY*math.sin(2*math.radians(geomMeanLongSun)) \
- 2*eccentOrbit*math.sin(math.radians(geomMeanAnomSun)) \
+ 4*eccentOrbit*varY*math.sin(math.radians(geomMeanAnomSun))*math.cos(2*math.radians(geomMeanLongSun)) \
- 0.5*(varY**2)*math.sin(4*math.radians(geomMeanLongSun)) \
- 1.25*(eccentOrbit**2)*math.sin(2*math.radians(geomMeanAnomSun)))
return solDec, eqOfTime
def __calculateSunriseHourAngle(self, solarDec, depression = 0.833):
"""Calculate hour angle for sunrise time in degrees"""
hourAngleArg = math.cos(math.radians(90 + depression)) \
/(math.cos(self.__latitude) * math.cos(solarDec)) \
- math.tan(self.__latitude) * math.tan(solarDec)
return math.degrees(math.acos(hourAngleArg))
def __calculateSolarTime(self, hour, eqOfTime, isSolarTime):
"""Calculate Solar time for an hour"""
if isSolarTime: return hour
return ((hour*60 + eqOfTime + 4*math.degrees(self.__longitude) - 60*self.timeZone) % 1440)/60
class LBSun(object):
"""Create a sun
Attributes:
datetime: A Ladybug datetime that represents the datetime for this sunVector
altitude: Solar Altitude in radians
azimuth: Solar Azimuth in radians
isSolarTime: A Boolean that indicates if datetime represents the solar time
isDaylightSaving: A Boolean that indicates if datetime is calculated for Daylight saving period
northAngle: North angle of the sunpath in Degrees. This will be only used to calculate the solar vector.
"""
def __init__(self, datetime, altitude, azimuth, isSolarTime, isDaylightSaving, northAngle):
self.__datetime = datetime
self.__altitude = altitude
self.__azimuth = azimuth
self.__isSolarTime = isSolarTime
self.__isDaylightSaving = isDaylightSaving
self.__northAngle = northAngle # useful to calculate sun vector - sun angle is in degrees
self.__hourlyData = [] # Place holder for hourly data I'm not sure how it will work yet
@property
def datetime(self):
"""Return datetime"""
return self.__datetime
@property
def northAngle(self):
"""Return north angle for +YAxis"""
return self.__northAngle
@property
def HOY(self):
"""Return Hour of the year"""
return self.__datetime.floatHOY
@property
def altitude(self):
"""Return solar altitude in degrees"""
return math.degrees(self.__altitude)
@property
def azimuth(self):
"""Return solar azimuth in degrees"""
return math.degrees(self.__azimuth)
@property
def altitudeInRadians(self):
"""Return solar altitude in radians"""
return self.__altitude
@property
def azimuthInRadians(self):
"""Return solar azimuth in radians"""
return self.__azimuth
@property
def isSolarTime(self):
"""Return a Boolean that indicates is datetime is solar time"""
return self.__isSolarTime
@property
def isDaylightSaving(self):
"""Return a Boolean that indicates is datetime is solar time"""
return self.__isDaylightSaving
@property
def hourlyData(self):
return self.__hourlyData
def appendHourlyData(self, data):
"""Append Ladybug hourly data to this sun"""
assert data.datetime.HOY == self.HOY
self.__hourlyData.append(data)
return True
@property
def isDuringDay(self):
# sun vector is flipped to look to the center
return self.sunVector.z <= 0
@property
def sunVector(self):
"""Return sun vector for this sun
Sun vector will face
"""
zAxis = euclid.Vector3(0., 0., -1.)
xAxis = euclid.Vector3(1., 0., 0.)
northVector = euclid.Vector3(0., 1., 0.)
# rotate north vector based on azimuth, altitude, and north
sunvector = northVector \
.rotate_around(xAxis, self.__altitude) \
.rotate_around(zAxis, self.__azimuth) \
.rotate_around(zAxis, math.radians(-self.__northAngle))
sunvector.normalize().flip()
return sunvector
|
antonszilasi/honeybeex
|
honeybeex/honeybee/ladybug/sunpath.py
|
Python
|
gpl-3.0
| 13,838
|
[
"EPW"
] |
b0504796839ec7f760ae30e7cae70a7cb01dd656b48604740724adffb95bd92b
|
#!/usr/bin/env python
import vtk
def main():
# Create a graph
graph = vtk.vtkMutableDirectedGraph()
v1 = graph.AddVertex()
v2 = graph.AddVertex()
v3 = graph.AddVertex()
graph.AddEdge(v1,v2)
graph.AddEdge(v2,v3)
# Manually set the position of the vertices
points = vtk.vtkPoints()
points.InsertNextPoint(0,0,0)
points.InsertNextPoint(1,0,0)
points.InsertNextPoint(2,0,0)
graph.SetPoints(points)
# Create the color array
vertexColors = vtk.vtkIntArray()
vertexColors.SetNumberOfComponents(1)
vertexColors.SetName("Color")
lookupTable = vtk.vtkLookupTable()
lookupTable.SetNumberOfTableValues(3)
lookupTable.SetTableValue(0, 1.0, 0.0, 0.0) # red
lookupTable.SetTableValue(1, 1.0, 1.0, 1.0) # white
lookupTable.SetTableValue(2, 0.0, 1.0, 0.0) # green
lookupTable.Build()
vertexColors.InsertNextValue(0)
vertexColors.InsertNextValue(1)
vertexColors.InsertNextValue(2)
# Add the color array to the graph
graph.GetVertexData().AddArray(vertexColors)
# Visualize
graphLayoutView = vtk.vtkGraphLayoutView()
graphLayoutView.AddRepresentationFromInput(graph)
graphLayoutView.SetLayoutStrategyToPassThrough()
graphLayoutView.SetVertexColorArrayName("Color")
graphLayoutView.ColorVerticesOn()
theme = vtk.vtkViewTheme()
theme.SetPointLookupTable(lookupTable)
graphLayoutView.ApplyViewTheme(theme)
graphLayoutView.ResetCamera()
graphLayoutView.GetInteractor().Initialize()
graphLayoutView.GetRenderer().GetActiveCamera().Zoom(0.8)
graphLayoutView.GetInteractor().Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Graphs/ColorVerticesLookupTable.py
|
Python
|
apache-2.0
| 1,712
|
[
"VTK"
] |
a52c32df53ce3ced3dd2663b03d16fc0c3939319c667f3f3acea32461cb1c41a
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-attributes
# Author : Stuart Paterson
########################################################################
"""
Retrieve attributes associated with the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.attributes( int(job), printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
Sbalbp/DIRAC
|
Interfaces/scripts/dirac-wms-job-attributes.py
|
Python
|
gpl-3.0
| 1,197
|
[
"DIRAC"
] |
8f9aa85cfd66ab1093caaacb24b103392854472619c76679f0f7b94a977ebbf4
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['extension'] = """
type: group
short-summary: Manage and update CLI extensions.
"""
helps['extension add'] = """
type: command
short-summary: Add an extension.
long-summary: To learn about installing extensions automatically, visit https://aka.ms/AzExtDynamicInstall.
parameters:
- name: --system
type: string
short-summary: Use a system directory for the extension.
long-summary: Default path is azure-cli-extensions folder under the CLI running python environment lib path, configurable by environment variable AZURE_EXTENSION_SYS_DIR. On Windows, you may need to open your shell as Administrator to run with the right permission.
examples:
- name: Add extension by name
text: az extension add --name anextension
- name: Add extension from URL
text: az extension add --source https://contoso.com/anextension-0.0.1-py2.py3-none-any.whl
- name: Add extension from local disk
text: az extension add --source ~/anextension-0.0.1-py2.py3-none-any.whl
- name: Add extension from local disk and use pip proxy for dependencies
text: az extension add --source ~/anextension-0.0.1-py2.py3-none-any.whl --pip-proxy https://user:pass@proxy.server:8080
- name: Add extension to system directory
text: az extension add --name anextension --system
- name: Add a specific version of extension
text: az extension add --name anextension --version 1.0.0
- name: Upgrade the extension if already installed
text: az extension add --upgrade --name anextension
"""
helps['extension list'] = """
type: command
short-summary: List the installed extensions.
"""
helps['extension list-available'] = """
type: command
short-summary: List publicly available extensions.
examples:
- name: List all publicly available extensions
text: az extension list-available
- name: List details on a particular extension
text: az extension list-available --show-details --query anextension
"""
helps['extension remove'] = """
type: command
short-summary: Remove an extension.
examples:
- name: Remove an extension. (autogenerated)
text: az extension remove --name MyExtension
crafted: true
"""
helps['extension show'] = """
type: command
short-summary: Show an extension.
examples:
- name: Show an extension. (autogenerated)
text: az extension show --name MyExtension
crafted: true
"""
helps['extension update'] = """
type: command
short-summary: Update an extension.
examples:
- name: Update an extension by name
text: az extension update --name anextension
- name: Update an extension by name and use pip proxy for dependencies
text: az extension update --name anextension --pip-proxy https://user:pass@proxy.server:8080
"""
helps['extension list-versions'] = """
type: command
short-summary: List available versions for an extension.
examples:
- name: List available versions for an extension
text: az extension list-versions --name anextension
"""
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/extension/_help.py
|
Python
|
mit
| 3,409
|
[
"VisIt"
] |
1618f2f190f8ccb5e4084e6f4f10015fdd66fa321114112f66db50acacc03517
|
# Copyright (c) 2018 Greg Pintilie - gregp@slac.stanford.edu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import chimera
import os
import os.path
import Tkinter
import tkFont
from CGLtk import Hybrid
import VolumeData
import _multiscale
import MultiScale.surface
import _surface
import numpy
import _contour
import Matrix
import Surface
import VolumeViewer
import FitMap
from sys import stderr
from time import clock
import _contour
import chimera.match
from axes import prAxes
import _multiscale
from CGLutil.AdaptiveTree import AdaptiveTree
import random
from VolumePath import Marker_Set, Marker, Link
from _contour import affine_transform_vertices as transform_vertices
from Matrix import xform_matrix, multiply_matrices, chimera_xform, identity_matrix, invert_matrix, shift_and_angle
import struct
from Rotamers import getRotamers
from chimera.resCode import protein1to3
OML = chimera.openModels.list
devMenu = True
isModelZ = True
dlgName = "mapqdlg"
dlgTitle = "MapQ (v1.2)"
dlgHelp = 'https://cryoem.slac.stanford.edu/ncmi/resources/software/mapq'
if isModelZ :
devMenu = False
dlgName = "modelzdlg"
dlgTitle = "ModelZ (v1.2)"
dlgHelp = 'https://cryoem.slac.stanford.edu/ncmi/resources/software/modelz'
atomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),
'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),
'S' : chimera.MaterialColor (1.000,1.000,0.188),
'O' : chimera.MaterialColor (1.000,0.051,0.051),
'N' : chimera.MaterialColor (0.188,0.314,0.973),
'P' : chimera.MaterialColor (1.0, 0.502, 0.0),
'H' : chimera.MaterialColor (0.9,.9,.9),
' ' : chimera.MaterialColor (0.2,1,.2)
}
atomColors2 = {'C' : (0.565,0.565,0.565,1),
'Cbb' : (0.2,0.6,0.2,1),
'S' : (1.000,1.000,0.188,1),
'O' : (1.000,0.051,0.051,1),
'N' : (0.188,0.314,0.973,1),
'P' : (1.0, 0.502, 0.0,1),
'H' : (0.9,.9,.9,1),
' ' : (0.7,.9,.7)
}
ac = { 'O' : chimera.MaterialColor( .9, .2, .2, 1.0 ),
'C' : chimera.MaterialColor( .7, .7, .7, 1.0 ),
'N' : chimera.MaterialColor( .2, .2, .9, 1.0 ),
'H' : chimera.MaterialColor( 1, 1, 1, 1.0 ),
' ' : chimera.MaterialColor( .2, .2, .2, 1.0 ),
}
def umsg ( txt ) :
print txt
status ( txt )
def status ( txt ) :
txt = txt.rstrip('\n')
msg.configure(text = txt)
msg.update_idletasks()
class MapQ_Dialog ( chimera.baseDialog.ModelessDialog ) :
name = dlgName
buttons = ( "Close" )
title = dlgTitle
help = dlgHelp
def fillInUI(self, parent):
self.group_mouse_mode = None
tw = parent.winfo_toplevel()
self.toplevel_widget = tw
tw.withdraw()
parent.columnconfigure(0, weight = 1)
row = 0
menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)
tw.config(menu = menubar)
f = Tkinter.Frame(parent)
f.grid(column=0, row=row, sticky='ew')
#l = Tkinter.Label(f, text=' ')
#l.grid(column=0, row=row, sticky='w')
# ---------------------------------------------------------------------------------
self.InitVars()
if 1 :
#row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='nsew', pady=0, padx=0)
Tkinter.Grid.columnconfigure(f, 0, weight=1)
Tkinter.Grid.columnconfigure(ff, 0, weight=1)
Tkinter.Grid.rowconfigure(f, row, weight=1)
Tkinter.Grid.rowconfigure(ff, 0, weight=1)
self.Canvas = Tkinter.Canvas(ff, height=80)
self.Canvas.grid(column=0, row=0, sticky='nsew')
self.modX = 10; self.modY = 10; self.modH = 30
self.seqX = 10; self.seqY = 45; self.seqH = 30
self.Canvas.bind("<ButtonPress-1>", lambda event : self.B1_Down ( event ) )
self.Canvas.bind("<Control-ButtonPress-1>", lambda event : self.B1_Down_Ctrl ( event ) )
self.Canvas.bind("<Shift-ButtonPress-1>", lambda event : self.B1_Down_Shift ( event ) )
self.Canvas.bind("<Option-ButtonPress-1>", lambda event : self.B1_Down_Alt ( event ) )
self.Canvas.bind("<Alt-ButtonPress-1>", lambda event : self.B1_Down_Alt ( event ) )
self.Canvas.bind("<ButtonPress-2>", lambda event : self.B2_Down (event) )
self.Canvas.bind("<ButtonPress-3>", lambda event : self.B3_Down (event) )
self.Canvas.bind("<ButtonRelease-1>", lambda event : self.B1_Up ( event ) )
self.Canvas.bind("<Control-ButtonRelease-1>", lambda event : self.B1_Up_Ctrl ( event ) )
self.Canvas.bind("<Shift-ButtonRelease-1>", lambda event : self.B1_Up_Shift ( event ) )
self.Canvas.bind("<Alt-ButtonRelease-1>", lambda event : self.B1_Up_Alt ( event ) )
self.Canvas.bind("<Option-ButtonRelease-1>", lambda event : self.B1_Up_Alt ( event ) )
self.Canvas.bind("<ButtonRelease-2>", lambda event : self.B2_Up (event) )
self.Canvas.bind("<Option-ButtonRelease-2>", lambda event : self.B2_Up_Alt (event) )
self.Canvas.bind("<Alt-ButtonRelease-2>", lambda event : self.B2_Up_Alt (event) )
self.Canvas.bind("<Control-ButtonRelease-2>", lambda event : self.B2_Up_Ctrl (event) )
self.Canvas.bind("<Command-ButtonRelease-2>", lambda event : self.B2_Up_Comm (event) )
self.Canvas.bind("<Shift-ButtonRelease-2>", lambda event : self.B2_Up_Shift (event) )
self.Canvas.bind("<ButtonRelease-3>", lambda event : self.B2_Up (event) )
self.Canvas.bind("<Option-ButtonRelease-3>", lambda event : self.B2_Up_Alt (event) )
self.Canvas.bind("<Alt-ButtonRelease-3>", lambda event : self.B2_Up_Alt (event) )
self.Canvas.bind("<Control-ButtonRelease-3>", lambda event : self.B2_Up_Ctrl (event) )
self.Canvas.bind("<Command-ButtonRelease-3>", lambda event : self.B2_Up_Comm (event) )
self.Canvas.bind("<Shift-ButtonRelease-3>", lambda event : self.B2_Up_Shift (event) )
self.Canvas.bind("<B1-Motion>", lambda event : self.B1_Drag ( event ) )
self.Canvas.bind("<B2-Motion>", lambda event : self.B2_Drag ( event ) )
self.Canvas.bind("<B3-Motion>", lambda event : self.B3_Drag ( event ) )
self.Canvas.bind("<Motion>", lambda event : self.Mouse_Move ( event ) )
self.Canvas.bind("<Configure>", lambda event : self.Canvas_Config (event) )
self.Canvas.bind("<Leave>", lambda event : self.Canvas_Leave (event) )
self.Canvas.bind("<MouseWheel>", lambda event : self.Canvas_Wheel (event) )
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w', pady=0, padx=5)
if 1 :
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w', pady=5, padx=10)
l = Tkinter.Label(ff, text='Map:', anchor=Tkinter.W)
l.grid(column=0, row=0, sticky='w')
self.dmap = Tkinter.StringVar(parent)
self.dmapMB = Tkinter.Menubutton ( ff, textvariable=self.dmap, relief=Tkinter.RAISED, width=20 )
self.dmapMB.grid (column=1, row=0, sticky='we', padx=1)
self.dmapMB.menu = Tkinter.Menu ( self.dmapMB, tearoff=0, postcommand=self.MapMenu )
self.dmapMB["menu"] = self.dmapMB.menu
self.cur_dmap = None
self.SetVisMap ()
l = Tkinter.Label(ff, text='Model:', anchor=Tkinter.W)
l.grid(column=2, row=0, sticky='w')
self.struc = Tkinter.StringVar(parent)
self.strucMB = Tkinter.Menubutton ( ff, textvariable=self.struc, relief=Tkinter.RAISED, width=20 )
self.strucMB.grid (column=3, row=0, sticky='we', padx=1)
self.strucMB.menu = Tkinter.Menu ( self.strucMB, tearoff=0, postcommand=self.StrucMenu )
self.strucMB["menu"] = self.strucMB.menu
self.cur_mol = None
self.cur_chains = []
self.SetVisMol ()
l = Tkinter.Label(ff, text=" Chain:" )
l.grid(column=4, row=0, sticky='w')
self.chain = Tkinter.StringVar(parent)
self.chainMB = Tkinter.Menubutton ( ff, textvariable=self.chain, relief=Tkinter.RAISED, width=4 )
self.chainMB.grid (column=5, row=0, sticky='we', padx=1)
self.chainMB.menu = Tkinter.Menu ( self.chainMB, tearoff=0, postcommand=self.ChainMenu )
self.chainMB["menu"] = self.chainMB.menu
if len ( self.cur_chains ) > 0 :
self.chain.set ( self.cur_chains[0] )
#self.ShowCh ( self.cur_chains[0] )
self.GetSeq ()
l = Tkinter.Label(ff, text=" Show:" )
l.grid(column=6, row=0, sticky='w')
b = Tkinter.Button(ff, text="Chain", command=self.AllChain)
b.grid (column=7, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="All", command=self.AllChains)
b.grid (column=8, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="Sel.", command=self.ShowOnlySel)
b.grid (column=9, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="At.", command=self.SetSelAtoms)
b.grid (column=10, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="Rib.", command=self.SetSelRibbon)
b.grid (column=11, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="SCs", command=self.ShowSCs)
b.grid (column=12, row=0, sticky='w', padx=1)
b = Tkinter.Button(ff, text="~SCs", command=self.HideSCs)
b.grid (column=13, row=0, sticky='w', padx=1)
if 1 :
l = Tkinter.Label(ff, text=' Zoom:', fg="#777")
l.grid(column=35, row=0, sticky='e')
b = Tkinter.Button(ff, text="-", command=self.ZoomMinus)
b.grid (column=36, row=0, sticky='w', padx=0)
b = Tkinter.Button(ff, text="+", command=self.ZoomPlus)
b.grid (column=37, row=0, sticky='w', padx=0)
b = Tkinter.Button(ff, text="<", command=self.ZoomBegin)
b.grid (column=38, row=0, sticky='w', padx=0)
b = Tkinter.Button(ff, text=">", command=self.ZoomEnd)
b.grid (column=39, row=0, sticky='w', padx=0)
if 1 :
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w', pady=0, padx=5)
fff = Tkinter.Frame(ff, borderwidth=1, padx=2, pady=2, relief=Tkinter.GROOVE)
fff.grid(column=10, row=0, sticky='e', pady=0, padx=5)
l = Tkinter.Label(fff, text='Calculate:', anchor=Tkinter.W)
l.grid(column=1, row=0, sticky='w')
#b = Tkinter.Button(fff, text="Sigma", command=self.CalcAllSigma )
#b.grid (column=2, row=0, sticky='w', padx=5)
#b = Tkinter.Button(fff, text="RadZ", command=self.CalcAllRadZ )
#b.grid (column=4, row=0, sticky='w', padx=5)
if isModelZ :
b = Tkinter.Button(fff, text="Z-scores", command=self.CalcZScores )
b.grid (column=5, row=0, sticky='w', padx=5)
else :
b = Tkinter.Button(fff, text="Q", command=self.CalcAllQ )
b.grid (column=2, row=0, sticky='w', padx=5)
b = Tkinter.Button(fff, text="Qp", command=self.CalcAllQp )
b.grid (column=3, row=0, sticky='w', padx=5)
b = Tkinter.Button(fff, text="Qf", command=self.GetQsFromFile )
b.grid (column=4, row=0, sticky='w', padx=5)
#b = Tkinter.Button(fff, text="R", command=self.CalcAllR )
#b.grid (column=5, row=0, sticky='w', padx=5)
#b = Tkinter.Button(fff, text="R", command=self.CalcAllR )
#b.grid (column=5, row=0, sticky='w', padx=5)
if 0 :
self.colorMod = Tkinter.StringVar()
self.colorMod.set ( 'sc' )
b = Tkinter.Button(ff, text="Color:", command=self.DoColor)
b.grid (column=20, row=0, sticky='w', padx=5)
c = Tkinter.Radiobutton(ff, text="BB", variable=self.colorMod, value = 'bb')
c.grid (column=21, row=0, sticky='w')
c = Tkinter.Radiobutton(ff, text="SC", variable=self.colorMod, value = 'sc')
c.grid (column=22, row=0, sticky='w')
c = Tkinter.Radiobutton(ff, text="Rand", variable=self.colorMod, value = 'rand')
c.grid (column=23, row=0, sticky='w')
else :
l = Tkinter.Label(ff, text=' Color:', fg="#777")
l.grid(column=20, row=0, sticky='e')
b = Tkinter.Button(ff, text="Backbone", command=self.DoColorBB)
b.grid (column=21, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Side Chain", command=self.DoColorSC)
b.grid (column=22, row=0, sticky='w', padx=5)
if not isModelZ :
b = Tkinter.Button(ff, text="Atoms", command=self.DoColorAtoms)
b.grid (column=23, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Random", command=self.DoColorRandom)
b.grid (column=24, row=0, sticky='w', padx=5)
l = Tkinter.Label(ff, text='', fg="#000")
l.grid(column=25, row=0, sticky='ens')
ff = Tkinter.Frame(ff, borderwidth=1, padx=2, pady=2, relief=Tkinter.GROOVE)
ff.grid(column=30, row=0, sticky='e', pady=0, padx=5)
l = Tkinter.Label(ff, text='For sequence select: ', fg="#000")
l.grid(column=35, row=0, sticky='ens')
#oft = Hybrid.Checkbutton(ff, 'Ribbon', True)
#oft.button.grid(column = 36, row = 0, sticky = 'w')
#self.showRibbon = oft.variable
#self.showRibbon.set ( 1 )
#oft = Hybrid.Checkbutton(ff, 'Side Chains', True)
#oft.button.grid(column = 37, row = 0, sticky = 'w')
#self.showAtoms = oft.variable
#self.showRibbon.set ( 1 )
oft = Hybrid.Checkbutton(ff, 'Extract', False)
oft.button.grid(column = 37, row = 0, sticky = 'w')
self.selExtract = oft.variable
self.selExtract.set ( 1 )
oft = Hybrid.Checkbutton(ff, 'Mesh', False)
oft.button.grid(column = 38, row = 0, sticky = 'w')
self.showMesh = oft.variable
#self.showRibbon.set ( 1 )
#oft = Hybrid.Checkbutton(ff, 'Preserve', False, command=self.cb)
#oft.button.grid(column = 39, row = 0, sticky = 'w')
#self.preserveSel = oft.variable
self.preserveSel = Tkinter.IntVar()
oft = Tkinter.Checkbutton( ff, text="Keep", variable=self.preserveSel, command=self.preserveSelCb)
oft.grid(column = 39, row = 0, sticky = 'w')
#self.showRibbon.set ( 1 )
self.showLigands = Tkinter.IntVar()
oft = Tkinter.Checkbutton( ff, text="Ligands", variable=self.showLigands )
oft.grid(column = 40, row = 0, sticky = 'w')
#b = Tkinter.Button(ff, text="Clear", command=self.ClearSel)
#b.grid (column=40, row=0, sticky='w', padx=5)
#self.keepExMap = Tkinter.IntVar()
#self.keepExMap.set(0)
#oft = Tkinter.Checkbutton( ff, text="Keep Extracted Maps", variable=self.keepExMap, command=self.keepExMapCb)
#oft.grid(column = 40, row = 0, sticky = 'w')
if devMenu :
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w', pady=0, padx=5)
if 0 :
b = Tkinter.Button(ff, text="Asp", command=self.asp )
b.grid (column=1, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Extract Res", command=self.Extract )
b.grid (column=2, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Align 1", command=self.AlignRes1 )
b.grid (column=3, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Align 2", command=self.AlignRes2 )
b.grid (column=4, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Avg", command=self.Avg )
b.grid (column=5, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Close", command=self.CloseExtracted )
b.grid (column=6, row=0, sticky='w', padx=5)
#b = Tkinter.Button(ff, text="Sbb", command=self.BB_Sigma )
#b.grid (column=8, row=0, sticky='w', padx=5)
#b = Tkinter.Button(ff, text="Z", command=self.ZScoreSel )
#b.grid (column=9, row=0, sticky='w', padx=5)
#b = Tkinter.Button(ff, text="Zr", command=self.RotaZ1 )
#b.grid (column=10, row=0, sticky='w', padx=5)
#b = Tkinter.Button(ff, text="R1", command=self.R1 )
#b.grid (column=11, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="ExA", command=self.ExCustA )
b.grid (column=12, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="ExB", command=self.ExCustB )
b.grid (column=13, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="ExC", command=self.ExCustC )
b.grid (column=14, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="S-sel", command=self.S_sel )
b.grid (column=20, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="Q-sel", command=self.Q_sel )
b.grid (column=21, row=0, sticky='w', padx=5)
b = Tkinter.Button(ff, text="SA-Q", command=self.SA_Q )
b.grid (column=22, row=0, sticky='w', padx=5)
#b = Tkinter.Button(ff, text="Ats", command=self.ShowAts)
#b.grid (column=25, row=0, sticky='w', padx=10)
b = Tkinter.Button(ff, text="Alts", command=self.FindAlts)
b.grid (column=28, row=0, sticky='w', padx=10)
b = Tkinter.Button(ff, text="X-Alts", command=self.DelAlts)
b.grid (column=29, row=0, sticky='w', padx=10)
b = Tkinter.Button(ff, text="Nr", command=self.ShowNear)
b.grid (column=40, row=0, sticky='w', padx=10)
b = Tkinter.Button(ff, text="Ds", command=self.ShowDists)
b.grid (column=41, row=0, sticky='w', padx=10)
b = Tkinter.Button(ff, text="AProfs", command=self.AProfs)
b.grid (column=42, row=0, sticky='w', padx=10)
dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)
Tkinter.Frame(dummyFrame).pack()
dummyFrame.grid(row=row,column=0,columnspan=7, pady=3, sticky='we')
row += 1
global msg
msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg="red", pady=5, padx=10)
msg.grid(column=0, row=row, sticky='ew')
self.msg = msg
self.showingAtoms = False
#umsg ( 'Select one or more segmented regions then press "Place Points" to start' )
def InitVars ( self ) :
self.mag = 13
self.seqt = []
self.boldSeqT = None
self.drag = ''
#self.sheetBaseClr = numpy.array ( [50.0,205.0,50.0] )
#self.sheetClr = numpy.array ( [204.0,255.0,204.0] )
self.sheetBaseClr = numpy.array ( [55.0,55.0,150.0] )
self.sheetClr = numpy.array ( [150.0,150.0,250.0] )
self.sheetClrD = self.sheetClr - self.sheetBaseClr
self.helixBaseClr = numpy.array ( [150.0,50.0,50.0] )
self.helixClr = numpy.array ( [255.0,150.0,150.0] )
self.helixClrD = self.helixClr - self.helixBaseClr
c = self.helixBaseClr; self.helix1 = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
c = self.helixClr; self.helix2 = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
self.switch = "#522"
c = self.sheetBaseClr; self.strand1 = "#77F"
c = self.sheetClr; self.strand2 = "#77F"
c = self.sheetBaseClr; self.sheet1 = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
c = self.sheetClr; self.sheet2 = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
self.loop1 = "#999"
self.selColor = "#7e7"
self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')
#self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')
self.tw = self.font.measure ( "a" )
self.seq = ""
#self.OrderMods ()
def SetVisMap ( self ) :
dmap = None
mlist = OML(modelTypes = [VolumeViewer.volume.Volume])
for m in mlist :
if m.display and not "sel_masked" in m.name :
dmap = m
break
if dmap == None :
if len(mlist) > 0 :
dmap = mlist[0]
if dmap != None :
self.dmap.set ( dmap.name + " (%d)" % dmap.id )
self.cur_dmap = dmap
def MapMenu ( self ) :
self.dmapMB.menu.delete ( 0, 'end' ) # Clear menu
mlist = OML(modelTypes = [VolumeViewer.volume.Volume])
for m in mlist :
self.dmapMB.menu.add_radiobutton ( label=m.name+" (%d)"%m.id, variable=self.dmap,
command=lambda m=m: self.MapSelected(m) )
def MapSelected ( self, dmap ) :
self.cur_dmap = dmap
print "Selected " + dmap.name
self.GetSeq ()
self.ZoomBegin ()
def GetChains ( self, mol ) :
ct = {}
for r in mol.residues:
ct[r.id.chainId] = 1
clist = ct.keys()
clist.sort()
return clist
def SetVisMol ( self ) :
mol = None
mlist = OML(modelTypes = [chimera.Molecule])
for m in mlist :
if m.display :
mol = m
break
if mol == None :
if len(mlist) > 0 :
mol = mlist[0]
if mol != None :
self.struc.set ( mol.name + " (%d)" % mol.id )
self.cur_mol = mol
self.cur_chains = self.GetChains ( mol )
SetBBAts ( mol )
def StrucSelected ( self, mol ) :
self.cur_mol = mol
print "Selected ", mol.name, " - ", mol.id
if mol :
mlist = OML(modelTypes = [chimera.Molecule])
for m in mlist :
m.display = False
mol.display = True
self.cur_chains = self.GetChains ( mol )
if len(self.cur_chains) == 0 :
self.chain.set ( "" )
elif self.chain.get() in self.cur_chains :
print " - ch " + self.chain.get() + " already sel"
self.ShowCh ( self.chain.get() )
else :
self.chain.set ( self.cur_chains[0] )
self.ShowCh ( self.chain.get() )
self.GetSeq ()
self.ZoomBegin ()
SetBBAts ( mol )
def ChainSelected ( self, ch ) :
print " - sel chain: ", ch, self.chain.get()
self.ShowCh ( ch )
self.GetSeq ()
self.ZoomBegin ()
def StrucMenu ( self ) :
self.strucMB.menu.delete ( 0, 'end' ) # Clear menu
mlist = OML(modelTypes = [chimera.Molecule])
for m in mlist :
self.strucMB.menu.add_radiobutton ( label=m.name+" (%d)"%m.id, variable=self.struc,
command=lambda m=m: self.StrucSelected(m) )
def ChainMenu ( self ) :
self.chainMB.menu.delete ( 0, 'end' ) # Clear menu
print " - chain menu"
print self.cur_chains
for ch in self.cur_chains :
self.chainMB.menu.add_radiobutton ( label=ch, variable=self.chain,
command=lambda ch=ch: self.ChainSelected(ch) )
def DoColor ( self ) :
print "color...", self.colorMod.get()
#colSC = self.colorSC.get()
#colRand = self.colorRand.get()
if self.colorMod.get() == "rand" :
self.RandColorChains()
else :
self.UpdateModColor ()
#if self.colorMap.get() :
# self.UpdateSurfColor ()
def DoColorBB ( self ) :
self.UpdateModColor ( "bb" )
# self.RandColorChains()
def DoColorSC ( self ) :
self.UpdateModColor ( "sc" )
# self.RandColorChains()
def DoColorAtoms ( self ) :
self.UpdateModColor ( "ats" )
# self.RandColorChains()
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0)
doRess = self.GetCurRess()
for r in doRess :
for at in r.atoms :
at.label = ""
doRess = chimera.selection.currentResidues()
#if len(doRess) == 0 :
# doRess = self.GetCurRess()
if len(doRess) > 0 :
for r in doRess :
for at in r.atoms :
if at.display == True :
if 1 and hasattr (at, 'Q1') and hasattr (at, 'Q2') :
at.label = "(%.2f)" % ( (at.Q1+at.Q2)/2.0 )
else :
at.label = "%.2f" % at.Q
at.labelColor = chimera.MaterialColor (0,0,0,1)
#at.labelOffset = chimera.Vector(0,0,0)
nats = self.AtsWithin ( r.atoms, 3.0, allAtTree )
for at in nats :
if at.display == True :
if 1 and hasattr (at, 'Q1') and hasattr (at, 'Q2') :
at.label = "(%.2f)" % ( (at.Q1+at.Q2)/2.0 )
else :
at.label = "%.2f" % at.Q
at.labelColor = chimera.MaterialColor (0,0,0,1)
#at.labelOffset = chimera.Vector(0,0,0)
# at.label, labelColor, labelCoord, labelOffset
# at.label = "HI"
# at.labelColor = chimera.MaterialColor (0,0,0,1)
umsg ( "Labeled atoms" )
def DoColorRandom ( self ) :
self.RandColorChains ()
def UpdateSurfColor ( self ) :
print " - surf of %s, by %s" % ( self.cur_dmap.name, self.cur_mol.name )
numAt = 0
for r in self.cur_mol.residues :
for at in r.atoms :
if at.element.name == "H" :
pass
else :
numAt += 1
allAtPos = numpy.zeros ( (numAt, 3) )
allAts = [None] * numAt
numAt = 0
for r in self.cur_mol.residues :
for at in r.atoms :
if at.element.name == "H" :
pass
else :
allAtPos[numAt] = at.coord().data()
allAts[numAt] = at
at.allPtI = numAt
numAt += 1
print " - tree with %d ats" % numAt
allAtTree = AdaptiveTree ( allAtPos.tolist(), allAts, 4.0)
print " - done"
def UpdateModColor ( self, colorMod ) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
if not hasattr (self, 'scores') :
umsg ( "No scores... press Q, Qp, or Qf button first" )
return
foundScore = False
for sc in self.scores :
if sc != None :
foundScore = True
if not foundScore :
umsg ( "No scores... press Q, Qp, or Qf button first" )
return
minScore, maxScore = 0,0
if colorMod == "sc" :
minScore, maxScore = self.minSCscore, self.maxSCscore
else :
minScore, maxScore = self.minBBscore, self.maxBBscore
cH = numpy.array( [0.0,1.0,0.0] )
cL = numpy.array( [1.0,0.0,0.0] )
for ri, r in enumerate ( self.seqRes ) :
sc = None
#sc = self.scores[ri] if colorSC else self.scores2[ri]
sc = r.scQ if colorMod == "sc" else r.bbQ
if sc == None :
r.ribbonColor = chimera.MaterialColor ( .7, .7, .7, 1.0 )
for at in r.atoms :
#at.color = r.ribbonColor
try :
at.color = atomColors[at.name[0]]
except :
at.color = atomColors[' ']
else :
h = (sc - minScore) / (maxScore - minScore)
if h > 1 : h = 1
if h < 0 : h = 0
c = h * cH + (1-h) * cL
r.ribbonColor = chimera.MaterialColor ( c[0], c[1], c[2], 1.0 )
for at in r.atoms :
#at.color = r.ribbonColor
try :
at.color = atomColors[at.name[0]]
except :
at.color = atomColors[' ']
def RandColorChains ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
m = self.cur_mol
from random import random as rand
ct = {}
for r in m.residues: ct[r.id.chainId] = 1
clist = ct.keys()
clist.sort()
chains_clrs = {}
cnames = ""
for ci, cid in enumerate ( clist ) :
clr = ( rand()*.8+.1, rand()*.8+.1, rand()*.8+.1 )
chains_clrs[cid] = chimera.MaterialColor ( clr[0], clr[1], clr[2], 1.0 )
cnames = cnames + cid
print "%s - color ribbon for %d chains -" % ( m.name, len(cnames) ), cnames
# color atoms
for r in m.residues :
clr = chains_clrs[r.id.chainId]
r.ribbonColor = clr
for at in r.atoms :
at.color = clr
def AllChain ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return
umsg ( "Showing mol %s chain %s" % (self.cur_mol.name, chainId) )
#ct = {}
#for r in self.cur_mol.residues: ct[r.id.chainId] = 1
#clist = ct.keys()
#clist.sort()
for r in self.cur_mol.residues :
if r.id.chainId == chainId :
if ("CA" in r.atomsMap and "N" in r.atomsMap and "C" in r.atomsMap) or ("O3'" in r.atomsMap and "O5'" in r.atomsMap) :
r.ribbonDisplay = True
r.ribbonDrawMode = 2
else :
r.ribbonDisplay = False
for at in r.atoms :
at.drawMode = at.Ball
at.display = True
else :
if ("CA" in r.atomsMap and "N" in r.atomsMap and "C" in r.atomsMap) or ("O3'" in r.atomsMap and "O5'" in r.atomsMap) :
r.ribbonDisplay = False
r.ribbonDrawMode = 2
else :
r.ribbonDisplay = False
for at in r.atoms :
at.drawMode = at.Ball
at.display = False
def ShowOnlySel ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
m = self.cur_mol
rsel = chimera.selection.currentResidues ()
if len(rsel) == 0 :
umsg ("Show only selected residues - no residue found to be selected")
return
risel = {}
for r in rsel :
risel["%d.%s" % (r.id.position, r.id.chainId)] = 1
for r in m.residues :
rid = "%d.%s" % (r.id.position, r.id.chainId)
if rid in risel :
r.ribbonDisplay = not self.showingAtoms
for at in r.atoms :
if at.element.name == "H" :
at.display = False
else :
at.display = True
else :
r.ribbonDisplay = False
for at in r.atoms :
at.display = False
def FindAlts ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
m = self.cur_mol
atMap = {}
for r in m.residues :
hasAlt = False
for at in r.atoms :
if len(at.altLoc) > 0 :
hasAlt = True
break
if hasAlt :
r.ribbonDisplay = True
for at in r.atoms :
if at.element.name == "H" :
at.display = False
else :
at.display = True
atMap[at] = 1
at.drawMode = at.EndCap
if at.element.name in atomColors :
at.color = atomColors[at.element.name]
else :
at.color = atomColors[" "]
else :
r.ribbonDisplay = True
for at in r.atoms :
at.display = False
for bond in m.bonds :
#if bond.atoms[0] in atMap or bond.atoms[1] in atMap :
bond.display = bond.Smart
bond.drawMode = bond.Stick
def DelAlts ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
m = self.cur_mol
atMap = {}
for r in m.residues :
altScores = {}
for at in r.atoms :
if at.isSC :
alt = "_" if at.altLoc == '' else at.altLoc
if alt in altScores :
altScores[alt].append ( at.Q )
else :
altScores[alt] = [at.Q]
if len ( altScores.keys() ) > 1 :
#print " - res %s %d.%s" % (r.type, r.id.position, r.id.chainId)
keepAlt = ''
maxScore = 0
for alt, scores in altScores.iteritems() :
avg = numpy.mean(scores)
#print " %s: %.2f - %d" % (alt, avg, len(scores))
if avg > maxScore :
keepAlt = alt
maxScore = avg
print " - %s %d.%s, keeping %s score %.2f" % (r.type, r.id.position, r.id.chainId, keepAlt, maxScore)
for at in r.atoms :
if at.isSC :
if at.altLoc == keepAlt :
at.altLoc = ''
else :
m.deleteAtom ( at )
def AllChains ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
m = self.cur_mol
#ct = {}
#for r in m.residues: ct[r.id.chainId] = 1
#clist = ct.keys()
#clist.sort()
for r in m.residues :
if ("CA" in r.atomsMap and "N" in r.atomsMap and "C" in r.atomsMap) or ("O3'" in r.atomsMap and "O5'" in r.atomsMap) :
r.ribbonDisplay = True
r.ribbonDrawMode = 2
else :
r.ribbonDisplay = False
for at in r.atoms :
#at.drawMode = at.Ball
at.display = True
def GetCurRess ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return []
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return []
ress = []
for r in self.cur_mol.residues :
if r.id.chainId == chainId :
ress.append ( r )
return ress
def SetSelRibbon ( self ) :
selRess = chimera.selection.currentResidues()
if len(selRess) > 0 :
self.SetDrawMode ( chimera.selection.currentResidues(), showRibbon = True )
else :
self.SetDrawMode ( self.GetCurRess(), showRibbon = True )
self.showingAtoms = False
def SetSelAtoms ( self ) :
selRess = chimera.selection.currentResidues()
if len(selRess) > 0 :
self.SetDrawMode ( chimera.selection.currentResidues(), showRibbon = False )
else :
self.SetDrawMode ( self.GetCurRess(), showRibbon = False )
self.showingAtoms = True
def SetDrawMode ( self, ress, showRibbon = None ) :
#if showRibbon == None :
# showRibbon = segmod_dialog().showRibbon.get()
#showRibbon = True
atMap = {}
#atI = 0
#c1 = (1.0,0.0,0.0,1)
#c1 = (1.0,0.0,0.0,1)
for res in ress :
for at in res.atoms :
if res.isProt or res.isNA :
at.drawMode = at.EndCap
at.display = True # not showRibbon
if at.element.name in atomColors :
at.color = atomColors[at.element.name]
else :
at.color = atomColors[" "]
atMap[at] = 1
res.ribbonDisplay, res.ribbonDrawMode = showRibbon, res.Ribbon_Round
#f = float(atI) / float(len(ress)-1)
#res.ribbonColor = chimera.MaterialColor( f*0.8+0.2, 0.02, (1-f)*0.8+0.2, 1.0 );
#atI+=1
for bond in ress[0].molecule.bonds :
if bond.atoms[0] in atMap or bond.atoms[1] in atMap :
bond.display = bond.Smart
bond.drawMode = bond.Stick
def ShowAts ( self ) :
for mod in chimera.openModels.list() :
if type(mod) == chimera.Molecule and mod.display == True :
#cid = "1"
#rs = [520, 521, 635, 575, 298, 550, 525, 639, 551, 303, 547, 305, 519]
cid = "4"
rs = [38, 42, 242, 244, 246, 181, 182, 135, 251, 94, 98, 91, 95, 284]
#cid = "E"
#rs = [128, 33, 136]
for res in mod.residues :
#if res.id.position in rs and res.id.chainId == cid :
if res.id.position in rs :
for at in res.atoms :
at.drawMode = at.EndCap
at.display = True
try :
at.color = atomColors[at.name[0]]
except :
at.color = atomColors[" "]
def HideSCs ( self ) :
for mol in chimera.selection.currentMolecules() :
if not hasattr ( mol, 'bbats' ) :
SetBBAts(mol)
mol.bbats = True
ress = chimera.selection.currentResidues()
if len(ress) == 0 :
ress = self.GetCurRess()
for res in ress :
#if res.id.position in rs and res.id.chainId == cid :
for at in res.atoms :
#at.drawMode = at.EndCap
at.display = not at.isSC
#try :
# at.color = atomColors[at.name[0]]
#except :
# at.color = atomColors[" "]
def ShowSCs ( self ) :
for mol in chimera.selection.currentMolecules() :
if not hasattr ( mol, 'bbats' ) :
SetBBAts(mol)
mol.bbats = True
ress = chimera.selection.currentResidues()
if len(ress) == 0 :
ress = self.GetCurRess()
for res in ress :
for at in res.atoms :
#at.drawMode = at.EndCap
at.display = True
try :
at.color = atomColors[at.name[0]]
except :
at.color = atomColors[" "]
def ShowNear ( self ) :
for mol in chimera.selection.currentMolecules() :
if not hasattr ( mol, 'bbats' ) :
SetBBAts(mol)
mol.bbats = True
ress = chimera.selection.currentResidues()
if len(ress) == 0 :
ress = self.GetCurRess()
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0)
nearRes = {}
for r in ress :
nats = self.AtsWithin ( r.atoms, 3.0, allAtTree )
for at in nats :
nearRes[at.residue] = 1
for res in nearRes.keys() :
for at in res.atoms :
#at.drawMode = at.EndCap
at.display = True
if at.name in atomColors :
at.color = atomColors[at.name[0]]
def ShowDists ( self ) :
m1, m2 = [m for m in chimera.openModels.list() if m.display==True and type(m) == chimera.Molecule]
print " - m1: %s" % m1.name
print " - m2: %s" % m2.name
amap = {}
for at in m2.atoms :
atId = "%d.%s.%s.%s" % (at.residue.id.position,at.residue.id.chainId,at.name,at.altLoc)
amap[atId] = at
from chimera.resCode import protein3to1
tt, tt2, nt = {}, {}, {}
for at in m1.atoms :
atId = "%d.%s.%s.%s" % (at.residue.id.position,at.residue.id.chainId,at.name,at.altLoc)
if atId in amap :
at2 = amap[atId]
d = (at.coord()-at2.coord()).length
else :
print " - not found:", atId
continue
if at.display and not at.residue.type in protein3to1 :
if not at.name in tt :
tt[at.name] = d; tt2[at.name] = d*d; nt[at.name] = 1.0
else :
tt[at.name] += d; tt2[at.name] += d*d; nt[at.name] += 1.0
if at.residue.type in protein3to1 :
if at.isBB :
if not "BB" in tt :
tt["BB"] = d; tt2["BB"] = d*d; nt["BB"] = 1.0
else :
tt["BB"] += d; tt2["BB"] += d*d; nt["BB"] += 1.0
else :
if not "SC" in tt :
tt["SC"] = d; tt2["SC"] = d*d; nt["SC"] = 1.0
else :
tt["SC"] += d; tt2["SC"] += d*d; nt["SC"] += 1.0
for tp, D in tt.iteritems () :
N, D2 = nt[tp], tt2[tp]
rmsd = numpy.sqrt ( D2/N )
avgd = D/N
print "%s - %.0f atoms, avgd %.5f, rmsd: %.5f" % ( tp, N, avgd, rmsd )
def ShowCh ( self, ch ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
print " - showing chain:", ch
m = self.cur_mol
print " - cur mol:", m.name
ct = {}
for r in m.residues: ct[r.id.chainId] = 1
clist = ct.keys()
clist.sort()
for r in m.residues :
show = True if r.id.chainId == ch else False
if ("CA" in r.atomsMap and "N" in r.atomsMap and "C" in r.atomsMap) or ("O3'" in r.atomsMap and "O5'" in r.atomsMap) :
r.ribbonDisplay = show
#r.ribbonDrawMode = 2
for at in r.atoms :
at.display = False
else :
r.ribbonDisplay = False
for at in r.atoms :
#at.drawMode = at.Ball
at.display = show
def GetMod ( self, name ) :
for m in chimera.openModels.list() :
if name != None and len(name) > 0 :
if m.name == name :
return m
else :
if m.display == True :
return m
return None
def GetSeq ( self ) :
if self.cur_mol == None :
umsg ( "No selected molecule" )
return
if len ( self.chain.get() ) == 0 :
umsg ( "No selected chain" )
return
self.RemoveSeq ()
try :
print self.cur_mol.name
except :
print " - mol may have been closed"
return
self.GetSeqFromStruc ( self.cur_mol, self.chain.get() )
if len(self.seq) > 0 :
print "-- seq from open mol -- %d res" % len(self.seq)
print self.seq
self.seqt = []
self.seqSheetR = [None] * len(self.seq)
self.seqHelixR = [None] * len(self.seq)
self.seqScoreR = [None] * len(self.seq)
self.seqScoreR2 = [None] * len(self.seq)
self.scores2 = [None] * len(self.seq)
self.scores = [None] * len(self.seq)
self.UpdateSeqFont ()
return True
return False
def RemoveSeq (self) :
if self.seq == "" :
return
for si in range ( len(self.seq) ) :
res = self.seq[si]
pred = self.pred[si]
conf = float ( self.conf[si] ) / 10.0
if pred == 'E' :
if self.seqSheetR[si] != None :
self.Canvas.delete ( self.seqSheetR[si] )
elif pred == 'H' :
if self.seqHelixR[si] != None :
self.Canvas.delete ( self.seqHelixR[si] )
if self.seqScoreR[si] != None :
self.Canvas.delete ( self.seqScoreR[si] )
if self.seqScoreR2[si] != None :
self.Canvas.delete ( self.seqScoreR2[si] )
# box showing selected Residue
if hasattr ( self, 'seqMouseR' ) :
self.Canvas.delete ( self.seqMouseR )
del self.seqMouseR
if hasattr ( self, 'seqText' ) :
self.Canvas.delete ( self.seqText )
del self.seqText
self.seqSel = None
self.seq = ""
self.UpdateSeqSel ()
def GetSeqFromStruc ( self, mol, chainId ) :
print "Getting seq from %s, %s" % (mol.name, chainId)
self.conf = ""
self.pred = ""
self.seq = ""
self.seqRes = []
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
protein3to1['HSD'] = protein3to1['HIS']
rids = {}
for r in mol.residues :
if r.id.chainId == chainId :
if r.type in protein3to1 or r.type in nucleic3to1 :
rids[r.id.position] = r
ris = rids.keys()
ris.sort()
for ri in ris :
r = rids[ri]
if r.type in protein3to1 :
self.seq = self.seq + protein3to1[r.type]
self.conf = self.conf + "9"
self.predi = "C"
if r.isSheet :
self.predi = "E"
if r.isHelix :
self.predi = "H"
self.pred = self.pred + self.predi
self.seqRes.append ( r )
elif r.type in nucleic3to1 :
self.seq = self.seq + nucleic3to1[r.type]
self.conf = self.conf + "9"
self.predi = "C"
self.pred = self.pred + self.predi
self.seqRes.append ( r )
def SSE ( self ) :
print "sse"
#self.GetFromMol ( mod, chainId )
def CurRes ( self ) :
#self.GetFromMol ( mod, chainId )
if self.cur_mol == None :
umsg ( "No selected molecule" )
return []
if self.cur_dmap == None :
umsg ( "No selected map" )
return []
if len ( self.chain.get() ) == 0 :
umsg ( "No selected chain" )
return []
from chimera.resCode import protein3to1
protein3to1['HSD'] = protein3to1['HIS']
rids = {}
for r in self.cur_mol.residues :
if r.id.chainId == self.chain.get() :
if r.type in protein3to1 :
rids[r.id.position] = r
print " - %d residues" % len(rids.values())
#return [ rids[6] ]
return rids.values ()
def CalcZScores ( self ) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
self.scores2 = [None] * len(self.seqRes)
scoreI = 0
status ( "Getting secondary structure elements..." )
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
resolution = 3.0 * self.cur_dmap.data.step[0]
#resolution = 3.0
umsg ( "Calculating backbone Z-scores..." )
zscores2 = []
if 0 : # old
sses = SSEs ( self.seqRes )
#print " - ",len(sses),"sse for ", len(ress), "res"
atI = 1
for el in sses :
si, ei, ss, elRess = el
if atI % 10 == 0 :
status ( "BB scores: %d/%d" % (atI,len(sses) ) )
atI += 1
#if 1 or (startRes < 129 and endRes > 129) :
startResI, endResI, sseType, ress = el
#print " : %d-%d, %s, %d res" % (startResI, endResI, sseType, len(ress))
zscore, ccs = zBB ( self.cur_mol, ress, resolution, self.cur_dmap )
#print ss, si, "-", ei, zscore
if zscore != None :
zscores2.append ( zscore )
for r in elRess :
r.bbZ = zscore
self.scores2[scoreI] = zscore
scoreI += 1
else :
bbs = BBsegs ( self.seqRes )
W = 5
atRes = 0
for bb in bbs :
print "%d res, %d-%d" % (len(bb),bb[0].id.position,bb[-1].id.position)
for ri, r in enumerate ( bb ) :
firstRi = max ( 0, ri-(W-1)/2 )
lastRi = min ( len(bb)-1, ri+(W-1)/2 )
ress = bb[firstRi:lastRi+1]
zscore, ccs = zBB ( self.cur_mol, ress, resolution, self.cur_dmap )
#print " %d : %d - %d, %.3f" % (ri, firstRi, lastRi, zscore)
if atRes % 50 == 0 :
status ( "Backbone - residue %d/%d" % (atRes,len(self.seqRes) ) )
#print "%d/%d" % (atRes,len(self.seqRes))
print "."
atRes += 1
if zscore != None :
zscores2.append ( zscore )
r.bbZ = zscore
r.CCS = ccs
r.bbQ = zscore
self.scores2[scoreI] = zscore
scoreI += 1
#print zscores2
print " - %d res, min %.2f max %.2f, avg %.2f" % (len(ress), min(zscores2), max(zscores2), numpy.average(zscores2) )
self.avgScore2 = numpy.average ( zscores2 )
doRes = []
doAllResInMol = False
if doAllResInMol :
for res in self.cur_mol.residues :
if "CA" in res.atomsMap and "N" in res.atomsMap and "C" in res.atomsMap :
doRes.append ( res )
print "++++ added all %d res from %s ++++" % (len(doRes), self.cur_mol.name)
else :
for r in self.seqRes :
try :
blah
ra = r.scZ
except :
doRes.append ( r )
#doRes = self.seqRes
#doRes = self.CurRes()
print " - need score for %d res" % len(doRes)
umsg ( "Calculating Side Chains / Bases Z-scores..." )
sczScores = []
if len(doRes) > 0 :
sczScores = CalcRotaZ ( self.cur_dmap, self.cur_mol, doRes )
#avgA, stdA = numpy.average ( A ), numpy.std ( A )
#umsg ( "Avg side chain Z-score: %.3f" % ( avgA ) )
if not doAllResInMol :
doRes = self.seqRes
self.scores = [None] * len(doRes)
for ri, r in enumerate ( doRes ) :
self.scores[ri] = r.scZ
scores = [x for x in self.scores if x is not None]
self.minScore = min ( scores )
self.maxScore = max ( scores )
self.avgScore = numpy.average ( scores )
print " - %d res, min %.2f max %.2f, avg %.2f" % (len(doRes),self.minScore,self.maxScore, self.avgScore)
self.minSCscore, self.maxSCscore = 0,2
self.minBBscore, self.maxBBscore = 0,4
bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
umsg ( "Average BB Z-score: %.2f (%.1fA), Average Side Chain Z-score: %.2f (%.1fA)" % (self.avgScore2, bbRes, self.avgScore, scRes) )
self.UpdateSeq ()
sByType = {}
rByType = {}
for r in doRes :
if r.scZ != None :
if not r.type in sByType :
rByType[r.type] = []
sByType[r.type] = []
rByType[r.type].append ( [r.scZ, r] )
sByType[r.type].append ( [r.scZ] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
avgs.sort ( reverse=True, key=lambda x: x[0] )
#mpath, mname = os.path.split ( dmap.data.path )
dname, dext = os.path.splitext ( self.cur_dmap.data.path )
#mfname = os.path.split ( self.cur_mol.openedAs[0] )[-1]
#mname, mext = os.path.splitext ( mfname )
avgm, numt = {}, {}
for avgScore, rtype in avgs :
rscores = rByType[rtype]
rscores.sort ( reverse=True, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
if R.isProt :
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, protein3to1[rtype], numRes, avgScore, R.id.position, R.id.chainId, highestScore)
else :
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, nucleic3to1[rtype], numRes, avgScore, R.id.position, R.id.chainId, highestScore)
avgm[rtype] = avgScore
numt[rtype] = numRes
ofname = "%s__%s__scz_rtype.txt" % (dname, self.cur_mol.name)
print " -> ", ofname
fp = open ( ofname, "w" )
for rt in ["PHE", "PRO", "ILE", "LEU", "VAL"] : # , "GLY", , "ALA"
fp.write ( "%s\t%d\t%f\n" % (rt, numt[rt], avgm[rt]) )
for rt in ["MET"] :
fp.write ( "%s\t%d\t%f\n" % (rt, numt[rt], avgm[rt]) )
for rt in ["HIS", "ARG", "LYS", "TRP", "CYS"] : #
try :
fp.write ( "%s\t%d\t%f\n" % (rt, numt[rt], avgm[rt]) )
except :
print " - no %s" % rt
for rt in ["GLN", "ASN", "THR"] :
fp.write ( "%s\t%d\t%f\n" % (rt, numt[rt], avgm[rt]) )
for rt in ["TYR", "GLU", "ASP", "SER"] :
fp.write ( "%s\t%d\t%f\n" % (rt, numt[rt], avgm[rt]) )
fp.close()
def CalcAllR (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
CalcSCBBr ( self.cur_mol, cid, self.cur_dmap )
self.scores, self.scores2 = [], []
scBB, scSC = [], []
for r in self.cur_mol.residues :
if cid == None or r.id.chainId == cid :
self.scores2.append ( r.SCBBr )
self.scores.append ( r.SCBBr )
r.scZ = r.SCBBr
r.bbZ = r.SCBBr
if r.SCBBr != None :
scBB.append ( r.SCBBr )
if r.SCBBr != None :
scSC.append ( r.SCBBr )
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average R sc : %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg)
print "Average R bb : %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg)
self.minSCscore, self.maxSCscore = 0.0,1
self.minBBscore, self.maxBBscore = 0.0,1
self.UpdateSeq ()
def CalcAllSigma (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
self.scores, self.scores2 = [], []
scBB, scSC = [], []
for r in self.cur_mol.residues :
if cid == None or r.id.chainId == cid :
self.scores2.append ( r.bbZ )
self.scores.append ( r.scZ )
if r.bbZ != None :
scBB.append ( r.bbZ )
if r.scZ != None :
scSC.append ( r.scZ )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average Sigma sc : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg, 1.0/scMin, 1.0/scMax, 1.0/scAvg)
print "Average Sigma bb : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg, 1.0/bbMin, 1.0/bbMax, 1.0/bbAvg)
self.minSCscore, self.maxSCscore = 0.0,0.5
self.minBBscore, self.maxBBscore = 0.0,0.2
self.UpdateSeq ()
def CalcAllQ (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
if 0 :
for r in self.cur_mol.residues :
if hasattr ( r, 'Q' ) : del r.Q
if hasattr ( r, 'scQ' ) : del r.scQ
if hasattr ( r, 'bbQ' ) : del r.bbQ
CalcQ (self.cur_mol, self.chain.get(), self.cur_dmap, allAtTree=allAtTree )
self.scores, self.scores2 = [], []
scBB, scSC = [], []
for r in self.cur_mol.residues :
if cid == None or r.id.chainId == cid :
self.scores2.append ( r.bbQ )
self.scores.append ( r.scQ )
if r.bbQ != None :
scBB.append ( r.bbQ )
if r.scQ != None :
scSC.append ( r.scQ )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average Q sc : %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg )
print "Average Q bb : %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg )
self.minSCscore, self.maxSCscore = 0.0,1
self.minBBscore, self.maxBBscore = 0.0,1
self.UpdateSeq ()
def CalcAllQp (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
#ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
#points = _multiscale.get_atom_coordinates ( ats, transformed = False )
#print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
#allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
if 0 :
for r in self.cur_mol.residues :
if hasattr ( r, 'Q' ) : del r.Q
if hasattr ( r, 'scQ' ) : del r.scQ
if hasattr ( r, 'bbQ' ) : del r.bbQ
CalcQp (self.cur_mol, self.chain.get(), self.cur_dmap, allAtTree=None )
molPath = os.path.splitext(self.cur_mol.openedAs[0])[0]
mapName = os.path.splitext(self.cur_dmap.name)[0]
nname = molPath + "__Q__" + mapName + ".pdb"
print "Saving pdb with Q-scores:", nname
chimera.PDBio().writePDBfile ( [self.cur_mol], nname )
self.scores, self.scores2 = [], []
scBB, scSC = [], []
for r in self.cur_mol.residues :
if cid == None or r.id.chainId == cid :
if r.isProt or r.isNA :
self.scores2.append ( r.bbQ )
self.scores.append ( r.scQ )
if r.bbQ != None :
scBB.append ( r.bbQ )
if r.scQ != None :
scSC.append ( r.scQ )
else :
self.scores2.append ( r.Q )
self.scores.append ( r.Q )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average Q sc : %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg )
print "Average Q bb : %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg )
self.minSCscore, self.maxSCscore = 0.0,1
self.minBBscore, self.maxBBscore = 0.0,1
self.UpdateSeq ()
def GetQsFromFile (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
chainId = self.chain.get()
if 0 :
for r in self.cur_mol.residues :
if hasattr ( r, 'Q' ) : del r.Q
if hasattr ( r, 'scQ' ) : del r.scQ
if hasattr ( r, 'bbQ' ) : del r.bbQ
molPath = os.path.splitext(self.cur_mol.openedAs[0])[0]
mapName = os.path.splitext(self.cur_dmap.name)[0]
nname = molPath + "__Q__" + mapName + ".pdb"
halfMap1, halfMap2 = "_h1" in mapName, "_h2" in mapName
if halfMap1 : print " - half map 1"
if halfMap2 : print " - half map 2"
rids = {}
for r in self.cur_mol.residues :
rids["%d.%s" % (r.id.position,r.id.chainId)] = r
# http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM
fin = open ( nname, "r" )
for line in fin :
if line[0:4] == "ATOM" or line[0:6] == "HETATM" :
aname, aloc, cid, resi, occ, bfac = line[12:16].strip(), line[16:17].strip(), line[21], int(line[22:26]), float ( line[54:60] ), float ( line[60:66] )
if occ < 1.0 :
rid = "%s.%s" % (resi,cid)
if rid in rids :
r = rids[rid]
if aname in r.atomsMap :
ats = r.atomsMap[aname]
found = False
for at in ats :
if at.altLoc == aloc :
at.occupancy = at.Q = occ
if not halfMap1 and not halfMap2 and hasattr(at,'Q1') :
del at.Q1
if not halfMap1 and not halfMap2 and hasattr(at,'Q2') :
del at.Q2
if halfMap1 : at.Q1 = at.Q
if halfMap2 : at.Q2 = at.Q
found = True
if not found :
#print " -xx- %s.%s - atom %s - loc %s" % (resi, cid, aname, aloc)
continue
else :
#print " -xx- %s.%s - atom %s" % (resi,cid, aname)
continue
fin.close ()
self.scores, self.scores2 = [], []
scBB, scSC = [], []
totQ, totN = 0.0, 0.0
QT, QN = { "Protein":0.0, "Nucleic":0.0, "Other":0.0 }, { "Protein":0.0, "Nucleic":0.0, "Other":0.0}
doRess = []
for r in self.cur_mol.residues :
if r.id.chainId == chainId :
doRess.append ( r )
print "Calc for %d res..." % ( len(doRess) )
for r in doRess :
CalcResQ (r, None, None, useOld=True )
for at in r.atoms :
totQ += at.Q; totN += 1.0
tp = "Other"
if at.residue.isProt : tp = "Protein"
if at.residue.isNA : tp = "Nucleic"
QT[tp] += at.Q; QN[tp] += 1.0
#try :
# CalcResQ (r, None, None, useOld=True )
#except :
# print " - x - res %d.%s/%s.%s" % (r.id.position, r.id.chainId, chainId, r.type)
# pass
if r.isProt or r.isNA :
self.scores2.append ( r.bbQ )
self.scores.append ( r.scQ )
if r.bbQ != None :
scBB.append ( r.bbQ )
if r.scQ != None :
scSC.append ( r.scQ )
else :
self.scores2.append ( r.Q )
self.scores.append ( r.Q )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average Q sc : %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg )
print "Average Q bb : %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg )
umsg ( "Model Q-score: %.2f" % (totQ/totN) )
for tp in ["Other", "Protein", "Nucleic"] :
if QN[tp] > 0 :
print " %s: %.2f" % (tp, QT[tp]/QN[tp])
self.minSCscore, self.maxSCscore = 0.0,1
self.minBBscore, self.maxBBscore = 0.0,1
self.UpdateSeq ()
self.QStats ()
def QStats ( self ) :
mol, dmap, chainId = self.cur_mol, self.cur_dmap, self.chain.get()
ress = []
for r in mol.residues :
if r.id.chainId == chainId :
ress.append ( r )
sByType = {}
rByType = {}
def addType (tp, r, score) :
if not tp in sByType :
rByType[tp] = []
sByType[tp] = []
rByType[tp].append ( [score, r] )
sByType[tp].append ( [score] )
for r in ress :
if r.isProt and r.type == "LEU" :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU(CD)", r, avg )
if r.isProt and r.type == "LEU" and r.id.position==114 :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU_114(CD)", r, avg )
if r.isProt and r.type == "VAL" :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL(CG)", r, avg )
if r.isProt and r.type == "VAL" and r.id.position==33 :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL_33(CG)", r, avg )
if r.isProt and r.type == "ARG" :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==76 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_76(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==9 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_9(NH)", r, avg )
if r.isProt and r.type == "LYS" :
avg = r.atomsMap["NZ"][0].Q
addType ( "LYS(NZ)", r, avg )
if r.isProt and r.type == "ASP" :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==42 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_42(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==131 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_131(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==171 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_171(OD)", r, avg )
if r.isProt and r.type == "GLU" :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==17 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_17(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==27 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_27(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==67 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_67(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==134 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_134(OE)", r, avg )
if r.isProt or r.isNA :
if r.scQ :
addType ( r.type, r, r.scQ )
else :
addType ( r.type, r, r.Q )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype, numpy.std (ra)] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
# sort by avg score
#avgs.sort ( reverse=True, key=lambda x: x[0] )
# sort by residue type
avgs.sort ( reverse=False, key=lambda x: x[1] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype, sdev in avgs :
rscores = rByType[rtype]
if len(rscores) > 0 :
rscores.sort ( reverse=True, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[R.type]
elif R.isNA : rts = nucleic3to1[R.type]
print "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore) )
fp.close()
def SA_Q (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
chainId = self.chain.get()
#ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
#points = _multiscale.get_atom_coordinates ( ats, transformed = False )
#print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
#allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
umsg ( "Solvent Accessibility vs. Q... making surface for %d atoms..." % len(self.cur_mol.atoms) )
print ".",
# https://en.m.wikipedia.org/wiki/Van_der_Waals_radius
vdwRadii = { 'H' : 1.2, # (1.09)[1]
'C' : 1.7,
'N' : 1.55,
'O' : 1.52,
'F' : 1.47,
'P' : 1.8,
'S' : 1.8 }
#vdwRadii = { 'H' : 1.5, 'C' : 1.5, 'N' : 1.5, 'O' : 1.5, 'F' : 1.5, 'P' : 1.5, 'S' : 1.5 }
if GetMod ( "Surface Pts" ) : chimera.openModels.close ( [GetMod ( "Surface Pts" )] )
if GetMod ( "SA pts" ) : chimera.openModels.close ( [GetMod ( "SA pts" )] )
if GetMod ( "SA- pts" ) : chimera.openModels.close ( [GetMod ( "SA pts" )] )
if GetMod ( "ASP pts" ) : chimera.openModels.close ( [GetMod ( "SA pts" )] )
surfPts = []
for at in self.cur_mol.atoms :
VWR = vdwRadii[at.element.name] if at.element.name in vdwRadii else 1.55
apts = SpherePts ( at.coord(), VWR, 100 )
#apts.extend ( SpherePts ( at.coord(), VWR/2.0, 50 ) )
apts.append ( at.coord().data() )
surfPts.extend ( apts )
#AddSpherePts ( apts, atomColors2[at.element.name], 0.1, "Surface Pts" )
#AddSpherePts ( apts, (.7,.7,.7,1), 0.1, "Surface Pts" )
umsg ( "Solvent Accessibility vs. Q... making tree for %d atoms, %d points..." % (len(self.cur_mol.atoms), len(surfPts) ) )
print ".",
surfPtsTree = AdaptiveTree ( surfPts, surfPts, 2.0 )
#AddSpherePts ( surfPts, atomColors2[at.element.name], 0.1, "Surface Pts" )
molPath = os.path.splitext(self.cur_mol.openedAs[0])[0]
mapName = os.path.splitext(self.cur_dmap.name)[0]
nname = molPath + "__SA-Q__" + mapName + ".txt"
fp = open ( nname, "w" )
umsg ( "Solvent Accessibility vs. Q ... saving to file %s" % nname )
print ".",
doRess = []
for r in self.cur_mol.residues :
if r.id.chainId == chainId :
doRess.append ( r )
print " - calc for %d res..." % len (doRess)
waterRad = 1.4
waterRad2 = 1.4*1.4
rt_sa = {}
for ri, r in enumerate ( doRess ) :
if 0 or r.type == "ASP" :
showPts = r.type == "ASP"
if 1 or not hasattr ( r, 'SAArea' ) :
numPtsOnSAS, tryPts = 0.0, 300.0
for at in r.scAtoms :
VWR = vdwRadii[at.element.name] if at.element.name in vdwRadii else 1.55
outPts = SpherePts ( at.coord(), VWR + waterRad, int(tryPts) )
#AddSpherePts ( outPts, (.9,.9,.2,1.0), 0.1, "ASP pts" )
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]; apt = numpy.array ( vPt )
opointsNear = surfPtsTree.searchTree ( vPt, waterRad )
onSurf = True
for npt in opointsNear :
v = apt - npt; r2 = numpy.sum ( v * v )
if r2 < waterRad2 :
onSurf = False; break
if onSurf :
numPtsOnSAS += 1.0
if showPts :
v = chimera.Point(pt[0], pt[1], pt[2]) - at.coord(); v.normalize()
pt = at.coord() + v * vdwRadii[at.element.name]
AddSpherePts ( [pt.data()], (.9,.2,.9,1.0), 0.1, "SA pts" )
#AddSpherePts ( [vPt], (.2,.9,.9,0.8), 0.11, "SA- pts" )
r.SAArea = 4 * numpy.pi * numpy.power ( vdwRadii[at.element.name], 2.0 ) * numPtsOnSAS / tryPts
if hasattr (r, 'scQ') and r.scQ != None :
fp.write ( "%s\t%d\t%f\t%f\n" % (r.type, r.id.position, r.scQ, r.SAArea) )
elif hasattr (r, 'Q') and r.Q != None :
fp.write ( "%s\t%d\t%f\t%f\n" % (r.type, r.id.position, r.Q, r.SAArea) )
if r.type in rt_sa :
rt_sa[r.type] += r.SAArea
else :
rt_sa[r.type] = r.SAArea
if ri % 10 == 0 :
umsg ( "SA - res %d/%d" % (ri, len(doRess)) )
fp.close()
umsg ( "Solvent Accessibility vs. Q ... saved to file %s ... done" % nname )
#nname = molPath + "__SAa-Q__" + mapName + ".txt"
#fp = open ( nname, "w" )
print "SA area by rtype:"
totalSA = 0.0
for rt, saa in rt_sa.iteritems () :
print "%s\t%f" % (rt, saa)
totalSA += saa
print " - total SA:%f" % totalSA
print "SA area / total area by rtype:"
for rt, saa in rt_sa.iteritems () :
print "%s\t%f" % (rt, saa/totalSA)
def CalcAllRadZ (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(self.cur_mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
CalcRadZ ( self.cur_mol, cid, self.cur_dmap, allAtTree, useOld=False, log=True )
self.scores, self.scores2 = [], []
scBB, scSC = [], []
for r in self.cur_mol.residues :
if cid == None or r.id.chainId == cid :
self.scores2.append ( r.bbZ )
self.scores.append ( r.scZ )
if r.bbZ != None :
scBB.append ( r.bbZ )
if r.scZ != None :
scSC.append ( r.scZ )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
print "Average RadZ sc : %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg)
print "Average RadZ bb : %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg)
umsg ( "Average Side Chain: %.2f, Backbone: %.2f" % (scAvg, bbAvg) )
self.minSCscore, self.maxSCscore = 0.0,4
self.minBBscore, self.maxBBscore = 0.0,4
self.UpdateSeq ()
def CalcAllRotaZ (self) :
ress = []
try :
ress = self.seqRes
except :
pass
if len ( ress ) == 0 :
umsg ( "No molecule/chain selected?" )
return
ok = True
try :
print self.cur_dmap.name
except :
status ( "Selected map not found; please choose another map" )
self.dmap.set ("")
ok = False
try :
print self.cur_mol.name
except :
status ( "Selected model not found; please choose another model" )
self.struc.set ("")
self.chain.set ("")
self.RemoveSeq ()
ok = False
if not ok :
return
cid = self.chain.get()
self.scores, self.scores2 = [], []
scBB, scSC = [], []
print "..."
#CalcRotaZ ( self.cur_dmap, self.cur_mol, self.cur_mol.residues )
for r in self.cur_mol.residues :
for at in r.atoms :
if not hasattr ( at, 'isBB' ) :
print " - noBB - atom %s, res %d.%s, chain %s" % (at.name, at.residue.id.position, at.residue.type, at.residue.id.chainId)
if cid == None or r.id.chainId == cid :
self.scores2.append ( r.bbZ )
self.scores.append ( r.scZ )
if r.bbS != None :
scBB.append ( r.bbZ )
if r.scS != None :
scSC.append ( r.scZ )
#bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334
#scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261
#scRes = (self.avgScore2 - 3.507) / -0.721
#bbRes = (self.avgScore - 6.1234) / -0.9191
scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)
bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)
umsg ( "Average Sigma sc : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f" % (scMin, scMax, scAvg, 1.0/scMin, 1.0/scMax, 1.0/scAvg) )
umsg ( "Average Sigma bb : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f" % (bbMin, bbMax, bbAvg, 1.0/bbMin, 1.0/bbMax, 1.0/bbAvg) )
self.minSCscore, self.maxSCscore = 0.0,2.0
self.minBBscore, self.maxBBscore = 0.0,2.0
self.UpdateSeq ()
def RtypeOut ( self, avgScore, rtype, rByType, fout ) :
pass
def UpdateSeqFont ( self ) :
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
if not hasattr ( self, 'seq' ) :
print " - update seq font - no seq"
return
print "seq len %d, text w %d" % ( len(self.seq), self.tw )
# boxes for BBs
x_at = self.seqX
y_at = self.seqY + self.seqH/2
y0 = self.seqY+5
y1 = self.seqY+self.seqH-5
for si in range ( len(self.seq) ) :
res = self.seq[si]
pred = self.pred[si]
conf = float ( self.conf[si] ) / 10.0
if pred == 'E' :
x0 = self.seqX + si * self.tw
x1 = x0 + self.tw
#self.Canvas.coords ( self.seqMouseR, x0, y0, x1, y1 )
#self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.NORMAL )
if self.seqSheetR[si] == None :
c = self.sheetBaseClr + self.sheetClrD * conf
clr = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
self.seqSheetR[si] = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=clr, fill=clr)
else :
self.Canvas.coords ( self.seqSheetR[si], x0, y0, x1, y1 )
elif pred == 'H' :
x0 = self.seqX + si * self.tw
x1 = x0 + self.tw
if self.seqHelixR[si] == None :
c = self.helixBaseClr + self.helixClrD * conf
clr = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
self.seqHelixR[si] = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=clr, fill=clr)
else :
self.Canvas.coords ( self.seqHelixR[si], x0, y0, x1, y1 )
# box showing selected Residue
if hasattr ( self, 'seqMouseR' ) :
self.Canvas.coords ( self.seqMouseR, 0, 0, 0, 0 )
else :
self.seqMouseR = self.Canvas.create_rectangle(0, 0, 0, 0, outline="#aab", fill="#bbc", state=Tkinter.HIDDEN)
x_at = self.seqX
y_at = self.seqY + self.seqH/2
if hasattr ( self, 'seqText' ) :
self.Canvas.coords ( self.seqText, x_at, y_at )
self.Canvas.itemconfigure ( self.seqText, font=self.font )
else :
self.seqText = self.Canvas.create_text( x_at, y_at, text=self.seq, font=self.font, anchor='w')
#self.UpdateSeqSel ()
def UpdateSeq ( self ) :
if not hasattr ( self, 'seq' ) :
print " - update seq - no seq"
return
x_at = self.seqX
y_at = self.seqY + self.seqH/2
if hasattr ( self, 'seqText' ) :
self.Canvas.coords ( self.seqText, x_at, y_at )
else :
self.seqText = self.Canvas.create_text( x_at, y_at, text=self.seq, font=self.font, anchor='w')
if 1 :
y0 = self.seqY+5
y1 = self.seqY+self.seqH-5
cH = numpy.array( [50,250,50] )
cL = numpy.array( [250,50,50] )
for si in range ( len(self.seq) ) :
#if i >= len ( self.seqt ) :
# t = self.Canvas.create_text( x_at, y_at, text=self.seq[i], font=self.font)
# self.seqt.append ( t )
#else :
# t = self.seqt [ i ]
# self.Canvas.coords ( t, x_at, y_at )
# x_at += self.tw
pred = self.pred[si]
if pred == 'E' :
if self.seqSheetR[si] != None :
x0 = self.seqX + si * self.tw
x1 = x0 + self.tw
self.Canvas.coords ( self.seqSheetR[si], x0, y0, x1, y1 )
elif pred == 'H' :
if self.seqHelixR[si] != None :
x0 = self.seqX + si * self.tw
x1 = x0 + self.tw
self.Canvas.coords ( self.seqHelixR[si], x0, y0, x1, y1 )
sc = None
try :
sc = self.scores[si]
except :
#continue
pass
if sc == None :
if self.seqScoreR[si] != None :
self.Canvas.delete ( self.seqScoreR[si] )
self.seqScoreR[si] = None
else :
xx0 = self.seqX + si * self.tw + 2
xx1 = xx0 + self.tw - 2
h = (sc - self.minSCscore) / (self.maxSCscore - self.minSCscore)
if h > 1 : h = 1
if h < 0 : h = 0
Y, H = self.modY, (self.modH/2 - 2)
yy0, yy1 = numpy.ceil(Y+H - H*h), numpy.floor(Y+H)
#c = self.helixBaseClr + self.helixClrD * conf
c = h * cH + (1-h) * cL
clr = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
if self.seqScoreR[si] != None :
self.Canvas.coords ( self.seqScoreR[si], xx0, yy0, xx1, yy1 )
self.Canvas.itemconfigure ( self.seqScoreR[si], outline=clr, fill=clr )
else :
self.seqScoreR[si] = self.Canvas.create_rectangle(xx0, yy0, xx1, yy1, outline=clr, fill=clr)
bb = None
try :
bb = self.scores2[si]
except :
#continue
pass
if bb == None :
if self.seqScoreR2[si] != None :
self.Canvas.delete ( self.seqScoreR2[si] )
self.seqScoreR2[si] = None
else :
xx0 = self.seqX + si * self.tw + 2
xx1 = xx0 + self.tw - 2
h = (bb - self.minBBscore) / (self.maxBBscore - self.minBBscore)
if h > 1 : h = 1
if h < 0 : h = 0
Y, H = self.modY, self.modH/2
#yy0, yy1 = Y+H, Y+H+H*h #upside down chart
yy0, yy1 = numpy.ceil(Y+H+H-H*h), numpy.floor(Y+H+H)
#c = self.helixBaseClr + self.helixClrD * conf
c = h * cH + (1-h) * cL
clr = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
if self.seqScoreR2[si] != None :
self.Canvas.coords ( self.seqScoreR2[si], xx0, yy0, xx1, yy1 )
self.Canvas.itemconfigure ( self.seqScoreR2[si], outline=clr, fill=clr )
else :
self.seqScoreR2[si] = self.Canvas.create_rectangle(xx0, yy0, xx1, yy1, outline=clr, fill=clr)
self.UpdateSeqSel ()
def SeqRec ( self, sel ) :
y0 = self.seqY+5
y1 = self.seqY+self.seqH-5
x0 = self.seqX + sel[0] * self.tw
x1 = self.seqX + (sel[1]+1) * self.tw
return x0, y0, x1, y1
def UpdateSeqSel ( self ) :
if not hasattr ( self, 'seqSel' ) :
return
if self.seqSel == None :
if hasattr(self, 'seqSelRect') :
self.Canvas.delete ( self.seqSelRect )
self.seqSelRect = None
return
x0, y0, x1, y1 = self.SeqRec ( self.seqSel )
if hasattr(self, 'seqSelRect') and self.seqSelRect != None :
self.Canvas.coords ( self.seqSelRect, x0, y0, x1, y1 )
else :
#c = self.helixBaseClr + self.helixClrD * conf
#clr = "#" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')
self.seqSelRect = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=self.selColor, width=3)
def B1_Down (self, event):
self.drag = ''
#print "b1 _", event.x, event.y
if self.isInSeq ( event.x, event.y ) :
self.drag = 'seq'
self.last_x = event.x
self.last_y = event.y
def B1_Down_Ctrl ( self, event ) :
#print "b1 _ <ctrl>", event.x, event.y
self.drag = ''
if self.isInSeq ( event.x, event.y ) :
self.drag = 'seqSel'
if hasattr ( self, 'seqSel' ) and self.seqSel != None :
self.prevSeqSel = self.seqSel
else :
self.prevSeqSel = None
#print "sel seq..."
seqI = ( event.x - self.seqX ) / self.tw
status ( "Start sequence sel at %d" % (seqI+1) )
self.seqSel = [seqI, seqI]
self.UpdateSeqSel ()
self.last_x = event.x
self.last_y = event.y
def B1_Down_Shift ( self, event ) :
print "B1 down - shift"
self.drag = ''
if self.isInSeq ( event.x, event.y ) :
if hasattr ( self, 'seqSel' ) and self.seqSel != None :
seqI = ( event.x - self.seqX ) / self.tw
if seqI >= self.seqSel[0] and seqI <= self.seqSel[1] :
self.drag = "con"
if not hasattr ( self, 'conLine' ) or self.conLine == None :
self.conLine = self.Canvas.create_line( event.x, event.y, event.x, event.y, fill="red", dash=(1, 1), width=2)
status ( "In selected sequence at %d" % seqI )
def B1_Down_Alt ( self, event ) :
print "B1 down - alt"
self.drag = ''
if self.isInMod ( event.x, event.y ) :
self.dragMod = self.SelectedMod ( event.x, event.y )
if self.dragMod != None :
if self.dragMod.type == "Helix" :
self.drag = 'modRot'
self.dragStartX = event.x
def B1_Up_Ctrl ( self, event ) :
print "b1 up - ctrl - ", event.x, event.y
self.B1_Up ( event )
def B1_Up_Shift ( self, event ) :
print "b1 up - shift - "
self.B1_Up ( event )
def B1_Up_Alt ( self, event ) :
print "b1 up - alt - "
self.B1_Up ( event )
def B1_Up (self, event):
print "b1 up - ", event.x, event.y
if self.drag == 'seqSel' and hasattr ( self, 'seqSel' ) :
status ( "Selected: %d-%d" % (self.seqSel[0], self.seqSel[1]) )
if hasattr ( self, 'prevSeqSel' ) and self.prevSeqSel != None :
if self.seqSel[0] == self.seqSel[1] :
self.seqSel = None
self.prevSeqSel = None
self.UpdateSeqSel ()
status ( "Cleared sequence selection" )
chimera.selection.clearCurrent ()
if self.seqSel != None :
m, cid = self.cur_mol, self.chain.get()
if m != None :
startI = self.seqRes [ self.seqSel[0] ].id.position
endI = self.seqRes [ self.seqSel[1] ].id.position
selStr = "#%d:%d-%d.%s" % (m.id,startI,endI,cid)
self.lastSelStr = "%d-%d.%s" % (startI,endI,cid)
if hasattr ( self, 'prevSel' ) and self.preserveSel.get () :
for s in self.prevSel :
print " -s- adding to sel:", s
selStr = selStr + "," + s
else :
self.prevSel = []
if self.preserveSel.get () :
self.prevSel.append ( "%d-%d.%s" % (startI,endI,cid) )
print " - added to selection list..."
umsg ( "Selected: " + selStr )
sel = chimera.selection.OSLSelection ( selStr )
chimera.selection.setCurrent ( sel )
#chimera.selection.addCurrent ( sel )
if self.selExtract.get () :
self.ShowSel ()
else :
status ( "no model visible" )
#else :
# print "cleared past sel"
# self.prevSel = []
elif self.drag == 'modSel' :
status ( 'Selected %d mods' % len(self.selMods) )
elif self.drag == 'con' :
selMod = None
if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :
selMod = self.selModPiece
self.selModPiece = None
else :
return
if hasattr ( self, 'conLine' ) and self.conLine != None :
self.Canvas.delete ( self.conLine )
self.conLine = None
status ( "connected to %s" % selMod.type )
selMod.seq = self.seqSel
selMod.numRes = (self.seqSel[1] - self.seqSel[0] + 1)
selMod.MakeMod ()
self.UpdateMod ()
self.drag = ''
print "mod: ", self.modX, " seq:", self.seqX
def preserveSelCb (self) :
print "Preserve set to ", self.preserveSel.get()
if self.preserveSel.get() :
print " - setting current selection to preserve..."
if hasattr ( self, 'lastSelStr' ) :
self.prevSel = [ self.lastSelStr ]
else :
print " - clearing current"
self.prevSel = []
#def keepExMapCb (self) :
# print "Kep ex map set to ", self.keepExMap.get()
def ClearSel ( self ) :
self.prevSel = []
self.seqSel = None
self.prevSeqSel = None
self.UpdateSeqSel ()
status ( "Cleared sequence selection" )
chimera.selection.clearCurrent ()
def ExCustA ( self ) :
if self.cur_dmap == None :
umsg ("Select a map first")
return
if self.cur_mol == None :
umsg ("Select a molecule first")
return
#selStr = "#%d:80-87.I,171-184.I,227-239.I,80-87.A,171-184.A,227-239.A,80-87.B,171-184.B,227-239.B,80-87.J,171-184.J,227-239.J,80-87.H,171-184.H,227-239.H" % self.cur_mol.id
selStr = "#%d:80-87.I,171-184.I,227-239.I,80-87.A,171-184.A,227-239.A,80-87.J,171-184.J,227-239.J" % self.cur_mol.id
umsg ( "Selected: " + selStr )
sel = chimera.selection.OSLSelection ( selStr )
chimera.selection.setCurrent ( sel )
self.ShowSel()
def ExCustB ( self ) :
if self.cur_dmap == None :
umsg ("Select a map first")
return
if self.cur_mol == None :
umsg ("Select a molecule first")
return
selStr = "#%d:428-435.F,365-376.F,396-402.F,428-435.I,365-376.I,396-402.I" % self.cur_mol.id
#selStr = "#%d:428-435.A,365-376.A,396-402.A,428-435.H,365-376.H,396-402.H" % self.cur_mol.id
umsg ( "Selected: " + selStr )
sel = chimera.selection.OSLSelection ( selStr )
chimera.selection.setCurrent ( sel )
self.ShowSel()
def ExCustC ( self ) :
if self.cur_dmap == None :
umsg ("Select a map first")
return
if self.cur_mol == None :
umsg ("Select a molecule first")
return
#selStr = "#%d:10:548-558.I,520-530.I,548-558.F,520-530.F" % self.cur_mol.id
selStr = "#%d:428-435.F,365-376.F,396-402.F,428-435.I,365-376.I,396-402.I,548-558.I,520-530.I,548-558.F,520-530.F" % self.cur_mol.id
umsg ( "Selected: " + selStr )
sel = chimera.selection.OSLSelection ( selStr )
chimera.selection.setCurrent ( sel )
self.ShowSel()
def ShowSel ( self ) :
#showRibbon = self.showRibbon.get()
showRibbon = not self.showingAtoms # self.showRibbon.get()
showLigands = self.showLigands.get()
showSC = True # self.showAtoms.get()
atoms = []
scores = []
selResM = {}
for r in chimera.selection.currentResidues () :
rid = "%d.%s" % (r.id.position, r.id.chainId)
selResM [rid] = 1
if self.cur_mol == None :
return
if 1 or not hasattr ( self.cur_mol, 'bbats' ) :
SetBBAts(self.cur_mol)
self.cur_mol.bbats = True
for r in self.cur_mol.residues :
rid = "%d.%s" % (r.id.position, r.id.chainId)
if rid in selResM :
if hasattr (r, 'scZ') and r.scZ != None :
scores.append(r.scZ)
r.ribbonDisplay = showRibbon
for at in r.atoms :
if at.element.name == "H" :
at.display = False
elif at.isSC :
if showSC :
at.display = True
atoms.append ( at )
else :
at.display = False
else :
at.display = True
atoms.append ( at )
if at.element.name in atomColors :
if at.isBB :
at.color = atomColors[at.element.name]
#if at.element.name == "C" :
# at.color = atomColors['Cbb']
else :
at.color = atomColors[at.element.name]
else :
r.ribbonDisplay = False
for at in r.atoms :
at.display = False
atTree = None
if showLigands :
points = _multiscale.get_atom_coordinates ( atoms, transformed = False )
print " - search tree: %d/%d ats" % ( len(atoms), len(r.molecule.atoms) )
atTree = AdaptiveTree ( points.tolist(), atoms, 2.0)
from chimera.resCode import protein3to1
ligAts = []
for r in self.cur_mol.residues :
rid = "%d.%s" % (r.id.position, r.id.chainId)
#if r.type == "MG" or r.type == "HOH" :
if not r.isProt and not r.isNA :
if len ( self.AtsWithin (r.atoms, 3.0, atTree) ) > 0 :
for at in r.atoms :
at.display = True
if at.element.name in atomColors :
at.color = atomColors[at.element.name]
atoms.append ( at )
ligAts.append ( at )
else :
for at in r.atoms :
at.display = False
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( ligAts )
#for bond in self.seqRes[0].molecule.bonds :
# bond.display = bond.Smart
#if bond.atoms[0] in atMap and bond.atoms[1] in atMap :
# #bond.display = bond.Smart
# bond.display = bond.Smart
#else :
# #bond.display = bond.Never
# bond.display = bond.Smart
if len(atoms) > 0 :
from _multiscale import get_atom_coordinates
points = get_atom_coordinates ( atoms, transformed = True )
COM, U, S, V = prAxes ( points )
moveCam = 0
if moveCam :
p0 = numpy.array ( chimera.viewer.camera.center )
p1 = numpy.array ( [ COM[0], COM[1], COM[2] ] )
for i in range (10) :
f = float(i) / 9.0
f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f
P = p0 * f1 + p1 * f2
chimera.viewer.camera.center = (P[0],P[1],P[2])
print ".",
print ""
atomRad = 2.5 # float ( self.maskWithSelDist.get() )
print " - %d selected atoms, mask at %.2f" % ( len(atoms), atomRad )
dmap = self.cur_dmap
mlist = OML(modelTypes = [VolumeViewer.volume.Volume])
for m in mlist :
if "sel_masked" in m.name :
chimera.openModels.close ( [m] )
if len ( atoms ) > 0 and dmap != None :
#points = get_atom_coordinates ( atoms, transformed = False )
self.PtsToMap ( points, dmap, atomRad, dmap.name + " sel_masked", False )
if self.showMesh.get () :
self.PtsToMap ( points, dmap, atomRad, dmap.name + " sel_masked_mesh", True )
if len(scores) > 0 :
umsg ( "%d residue scores, avg score %.1f" % ( len(scores), numpy.average(scores) ) )
else :
umsg ( "no atoms selected, try reselecting the model and chain..." )
def AtsWithin (self, ats, R, atTree) :
nearAts = []
R2 = R * R
for at in ats :
pt = at.coord()
vPt = numpy.array ( [pt[0], pt[1], pt[2]] )
opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )
if len(opointsNear) > 0 :
for p in opointsNear :
v = vPt - p.coord().data()
sqSum = numpy.sum ( v * v )
if sqSum < R2 :
nearAts.append (p)
return nearAts
def PtsToMap0 ( self, points, dmap, atomRad, nname, neg = 1.0 ) :
import _contour
_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )
gdata = VolumeData.Array_Grid_Data ( mdata.full_matrix()*neg, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = nname )
nv = VolumeViewer.volume.volume_from_grid_data ( gdata )
nv.name = nname
dmap.display = False
nv.region = ( nv.region[0], nv.region[1], [1,1,1] )
nv.surface_levels[0] = dmap.surface_levels[0]
ro = VolumeViewer.volume.Rendering_Options()
#ro.smoothing_factor = .5
#ro.smoothing_iterations = 20
#ro.surface_smoothing = True
nv.update_surface ( False, ro )
for sp in nv.surfacePieces :
v, t = sp.geometry
if len(v) == 8 and len(t) == 12 :
sp.display = False
else :
sp.color = (0.7, 0.7, 0.7, 0.2)
def PtsToMap ( self, points, dmap, atomRad, nname, showMesh = False ) :
#_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )
import _contour
points1 = numpy.copy ( points )
_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
points0 = numpy.copy ( points1 )
_contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )
bound = 5
li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li,lj,lk, hi,hj,hk, n1,n2,n3 )
#nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )
#dmat = dmap.full_matrix()
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
#nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
#print " - %s origin:" % dmap.name, O
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
#print " - new map origin:", nO
nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
#print " - fmap grid dim: ", numpy.shape ( fmap.full_matrix() )
#print " - new map grid dim: ", numpy.shape ( nmat )
npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
#dvals = dmap.interpolated_values ( npoints, chimera.Xform.identity() )
#dvals = dmap.interpolated_values ( npoints, dmap.openState.xform.inverse() )
#dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )
#nze = numpy.nonzero ( dvals )
nmat = dvals.reshape( (nn3,nn2,nn1) )
#f_mat = fmap.data.full_matrix()
#f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )
#df_mat = df_mat * f_mask
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
#try : nv = VolumeViewer.volume.add_data_set ( ndata, None )
#except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
#nv.openState.xform = dmap.openState.xform
mdata = VolumeData.zone_masked_grid_data ( ndata, points0, atomRad )
gdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), nO, nstep, dmap.data.cell_angles, name = "atom masked" )
nv = VolumeViewer.volume.volume_from_grid_data ( gdata )
nv.openState.xform = dmap.openState.xform
nv.name = nname
dmap.display = False
nv.region = ( nv.region[0], nv.region[1], [1,1,1] )
nv.surface_levels[0] = dmap.surface_levels[0]
ro = VolumeViewer.volume.Rendering_Options()
ro.smoothing_factor = .3
ro.smoothing_iterations = 2
ro.surface_smoothing = False
ro.square_mesh = True
ro.line_thickness = 3
nv.update_surface ( False, ro )
setro (ro)
for sp in nv.surfacePieces :
v, t = sp.geometry
if len(v) == 8 and len(t) == 12 :
sp.display = False
else :
if showMesh :
sp.color = (.5, .5, .5, 1.0)
sp.displayStyle = sp.Mesh
else :
sp.color = (0.7, 0.7, 0.7, 0.1)
def B1_Drag (self, event):
#print "b1m ", event.x, event.y
if self.drag == 'seq' :
d = event.x - self.last_x
self.seqX += d
#GetSegMod().seqX = self.seqX
self.UpdateSeq ()
elif self.drag == 'mod' :
d = event.x - self.last_x
self.modX += d
#GetSegMod().modX = self.modX
self.UpdateMod ()
elif self.drag == 'seqSel' :
if hasattr ( self, 'seqSel' ):
seqI = ( event.x - self.seqX ) / self.tw
if seqI > self.seqSel[0] :
self.seqSel[1] = seqI
elif seqI < self.seqSel[1] :
self.seqSel[0] = seqI
status ( "Sequence selected %d - %d" % (self.seqSel[0]+1, self.seqSel[1]+1) )
self.UpdateSeqSel ()
elif self.drag == 'con' :
x1, y1, x2, y2 = self.Canvas.coords ( self.conLine )
self.Canvas.coords ( self.conLine, x1, y1, event.x, event.y )
self.SelectedModClr ( event.x, event.y )
elif self.drag == "modRot" :
dx = event.x - self.dragStartX
self.dragStartX = event.x
self.dragMod.Rotate ( dx )
self.last_x = event.x
self.last_y = event.y
def B2_Down (self, event):
print "b2 - down"
def B2_Up (self, event):
print "b2 - up", event.x, event.y
if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :
if self.selModPiece.type == "Loop" :
self.selModPiece.MakeMod ()
else :
self.selModPiece.switch = not self.selModPiece.switch
self.selModPiece.MakeMod ()
self.UpdateMod ()
def B2_Up_Ctrl (self, event):
print "b2 - up - control", event.x, event.y
if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :
if self.selModPiece.type == "Loop" :
MakeLoopMod1 ( self.selModPiece )
#MakeLoopMod ( self.selModPiece )
def B2_Up_Alt (self, event):
print "b2 - up - alt", event.x, event.y
if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :
if self.selModPiece.type == "Loop" :
LoopPathOpt ( self.selModPiece, self.refUseMap.get() )
def B2_Up_Shift (self, event):
print "b2 - up - alt", event.x, event.y
if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :
if self.selModPiece.type == "Loop" :
LoopPathOpt ( self.selModPiece, self.refUseMap.get() )
def B2_Up_Comm (self, event):
print "b2 - up - command", event.x, event.y
def B2_Drag (self, event):
#print "b2m ", event.x, event.y
pass
def B3_Down (self, event):
print "b3 _", event.x, event.y
def B3_Up (self, event):
print "b3 ^", event.x, event.y
self.B2_Up ( event )
def B3_Drag (self, event):
#print "b3m ", event.x, event.y
pass
def isInSeq ( self, x, y ) :
if y >= self.seqY and y <= self.seqY + self.seqH :
return True
else :
return False
def isInMod ( self, x, y ) :
if y >= self.modY and y <= self.modY + self.modH :
return True
else :
return False
def Mouse_Move (self, event):
#print "mod m ", event.x, event.y
#self.Canvas.coords ( self.seqMouseLine, event.x,self.seqY,event.x,self.seqY+self.seqH )
if self.isInSeq ( event.x, event.y ) and hasattr ( self, 'seq') and len(self.seq) > 0 :
if hasattr ( self, 'seqRec' ) and hasattr ( self, 'tw' ) and hasattr ( self, 'seqMouseR' ) :
self.Canvas.itemconfigure ( self.seqRec, state=Tkinter.NORMAL )
si = ( event.x - self.seqX ) / self.tw
if si < 0 :
si = 0
if si < len ( self.seq ) :
res = self.seqRes [ si ]
resEnd = self.seqRes [ len(self.seqRes) - 1 ]
try :
status ( "Sequence: %s/%s %d/%d" % ( self.seq[si], res.type, res.id.position, resEnd.id.position ) )
except :
status ( "model not found" )
self.chain.set("")
self.struc.set("")
self.RemoveSeq ()
return
y0 = self.seqY+5
y1 = self.seqY+self.seqH-5
if event.y >= y0 and event.y <= y1 and hasattr ( self, 'seqMouseR' ) :
x0 = self.seqX + si * self.tw
x1 = x0 + self.tw
self.Canvas.coords ( self.seqMouseR, x0, y0, x1, y1 )
self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.NORMAL )
else :
self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.HIDDEN )
else :
self.Canvas.itemconfigure ( self.seqRec, state=Tkinter.HIDDEN )
if hasattr ( self, 'seqMouseR' ) :
self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.HIDDEN )
self.last_x = event.x
self.last_y = event.y
def Canvas_Leave ( self, event ) :
#self.Canvas.coords ( self.seqMouseLine, 0,0,0,0 )
pass
def Canvas_Config (self, event) :
#print "mod cfg ", event.width, event.height
self.W = event.width
self.H = event.height
#self.Canvas.delete("all")
if 1 :
if hasattr(self, 'backRec') :
self.Canvas.coords (self.backRec, 0, 0, self.W, self.H)
else :
self.backRec = self.Canvas.create_rectangle(0, 0, self.W, self.H, outline="#eee", fill="#eee")
#self.seqMouseLine = self.Canvas.create_line(0, 0, 0, 0, fill="#66a")
if hasattr ( self, 'seqRec' ) :
self.Canvas.coords ( self.seqRec, 0, self.seqY, self.W, self.seqY+self.seqH )
else :
self.seqRec = self.Canvas.create_rectangle(0, self.seqY, self.W, self.seqY+self.seqH, outline="#ddd", fill="#ddd" )
self.Canvas.tag_lower(self.seqRec)
self.Canvas.tag_lower(self.backRec)
def Canvas_Wheel ( self, event ) :
if self.isInSeq (self.last_x, self.last_y) :
#self.seqX += event.delta * 10
self.mag = self.mag + event.delta
if self.mag > 15 : self.mag = 15
if self.mag < 2 : self.mag = 2
self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')
#self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')
self.tw = self.font.measure ( "a" )
#GetSegMod().seqX = self.seqX
self.UpdateSeqFont ()
self.UpdateSeq ()
# ['__doc__', '__module__', 'char', 'delta', 'height', 'keycode', 'keysym', 'keysym_num', 'num', 'send_event', 'serial', 'state', 'time', 'type', 'widget', 'width', 'x', 'x_root', 'y', 'y_root']
#print dir(event)
#print event.delta
status ( "Mag: %d" % self.mag )
def ZoomMinus ( self ) :
self.mag = self.mag - 1
if self.mag > 15 : self.mag = 15
if self.mag < 2 : self.mag = 2
#print "w ", event.delta, " mag: ", self.mag
self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')
#self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')
self.tw = self.font.measure ( "a" )
self.UpdateSeqFont ()
self.UpdateSeq ()
status ( "Magnification: %d" % self.mag )
def ZoomPlus ( self ) :
self.mag = self.mag + 1
if self.mag > 15 : self.mag = 15
if self.mag < 2 : self.mag = 2
#print "w ", event.delta, " mag: ", self.mag
self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')
#self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')
self.tw = self.font.measure ( "a" )
self.UpdateSeqFont ()
self.UpdateSeq ()
status ( "Magnification: %d" % self.mag )
def ZoomBegin ( self ) :
self.seqX = 10
self.UpdateSeq ()
def ZoomEnd ( self ) :
self.seqX = - ( len(self.seq) - 50 ) * self.tw
self.UpdateSeq ()
def isSelected ( self, fmap ) :
for sp in fmap.surfacePieces :
if sp in Surface.selected_surface_pieces() :
return True
return False
def S_sel (self) :
# show sigma for a side chain
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
selAtom = selAts[0]
r = selAtom.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)
if 1 or not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(r.molecule.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
#print "-"
import time
start = time.time()
#sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )
if 1 :
print "_sigma____________________________"
sigma = RadAts ( [selAtom], dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )
res = sigma * numpy.pi * numpy.sqrt(2.0)
end = time.time()
print "%s - sigma: %.3f, res: %.3f, time: %f" % ( selAtom.name, sigma, res, (end - start) )
def Q_sel (self) :
# show sigma for a side chain
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
selAtom = selAts[0]
r = selAtom.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)
print " - in map: %s" % dmap.name
if 1 or not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(r.molecule.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
#print "-"
import time
start = time.time()
#sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )
if 0 :
print "_sigma____________________________"
sigma = RadAts ( [selAtom], dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )
res = sigma * numpy.pi * numpy.sqrt(2.0)
end = time.time()
print "%s - sigma: %.3f, res: %.3f, time: %f" % ( selAtom.name, sigma, res, (end - start) )
if 1 :
print "_Q_score____________________________"
minD, maxD = MinMaxD ( dmap )
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
if 0 :
minD = numpy.min(M)
print " - min before masking: %.4f" % minD
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, 0.5 )
M = mdata.full_matrix ()
minD = numpy.min(M)
print " - min after masking: %.4f" % minD
M = numpy.where ( M == 0.0, numpy.ones_like(M)*(minD-0.2), M )
import _volume
points = _volume.high_indices(M, minD-0.1)
fpoints = points.astype(numpy.single)
fpoint_weights = M[points[:,2],points[:,1],points[:,0]]
minD = numpy.min(fpoint_weights)
print " - min of mask pts: %.4f" % minD
#sigma = 2.0 / (numpy.pi * numpy.sqrt(2.0))
sigma = 0.5
rr = RadCC ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=1, numPts=8, toRAD=2.0, dRAD=0.2, minD=minD, maxD=maxD, fitg=1 )
CC, CCm, yds, err = rr
#CC, CCm = rr
#CC, CCm = RadCC ( selAtom.residue.scAtoms, dmap, sigma, allAtTree=allAtTree, show=0, log=1, numPts=5, toRAD=3, dRAD=0.5 )
end = time.time()
print " - sigma: %.3f, cc: %.3f, ccm: %.3f, time: %f" % ( sigma, CC, CCm, (end - start) )
print "Atoms in %d.%s %s" % (selAtom.residue.id.position, selAtom.residue.id.chainId, selAtom.residue.type)
avg, N = 0.0, 0.0
for at in selAtom.residue.atoms :
CC, CCm = RadCC ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.2, minD=minD, maxD=maxD )
print " - %s : %.2f" % (at.name, CCm)
if at.isSC :
avg += CCm
N += 1.0
if N > 0 :
print " - avg sc Q: %.2f" % (avg/N)
def AProfs (self) :
mol = self.cur_mol
if self.cur_mol == None :
umsg ("Select a molecule first")
return []
chainId = self.chain.get()
dmap = self.cur_dmap
print " - in map: %s" % dmap.name
if 1 or not hasattr ( mol, 'bbats' ) :
SetBBAts(mol)
mol.bbats = True
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
sigma = 0.5
minD, maxD = MinMaxD ( dmap )
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
asp_o, glu_o, arg_n, leu_c, val_c = [], [], [], [], []
def doAt (at, arr) :
rr = RadCC ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=50, toRAD=3.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=1 )
CC, CCm, yds, err = rr
#print len(yds)
#if len(yds) == 31 :
arr.append ( yds )
print "%.2f\t%.5f\t%s.%d.%s" % (at.Q,err,r.type,r.id.position,at.name),
for y in yds : print "\t%f" % y,
print ""
for r in mol.residues :
if 1 :
if r.type == "ASP" :
for at in [r.atomsMap["OD1"][0], r.atomsMap["OD2"][0]] :
if at.Q > 0.8 : doAt (at, asp_o)
if r.type == "GLU" :
for at in [r.atomsMap["OE1"][0], r.atomsMap["OE2"][0]] :
if at.Q > 0.8 : doAt (at, glu_o)
if 1 :
if r.type == "VAL" :
for at in [r.atomsMap["CG1"][0], r.atomsMap["CG2"][0]] :
if at.Q > 0.9 : doAt (at, val_c)
if r.type == "LEU" :
for at in [r.atomsMap["CD1"][0], r.atomsMap["CD2"][0]] :
if at.Q > 0.9 : doAt (at, leu_c)
if 1 :
if r.type == "ARG" :
for at in [r.atomsMap["NH1"][0], r.atomsMap["NH2"][0]] :
if at.Q > 0.8 : doAt (at, arg_n)
#if r.type == "LEU" :
# for at in [r.atomsMap["CD1"][0], r.atomsMap["CD2"][0]] :
# if at.Q > 0.8 : doAt (at, leu_c)
def outAt (arr, label) :
aa = numpy.array ( arr )
s = numpy.std(aa,axis=0)
m = numpy.mean(aa,axis=0)
print label, "\t", aa.shape
print label,
for i in range(len(s)) :
print "\t%f" % m[i],
print ""
print label,
for i in range(len(s)) :
print "\t%f" % s[i],
print ""
outAt ( val_c, "VAL(CG)" )
outAt ( leu_c, "LEU(CD)" )
outAt ( arg_n, "ARG(NH)" )
outAt ( asp_o, "ASP(OD)" )
outAt ( glu_o, "GLU(OE1)" )
def BB_Sigma (self) :
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
a = selAts[0]
r = a.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)
if 1 or not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(r.molecule.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
#print "-"
import time
start = time.time()
sigma = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=10, toRAD=2, dRAD=0.25 )
end = time. time()
print "%s - rad: %.3f, time: %f" % ( a.name, sigma, (end - start) )
def ZScoreSel (self) :
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
a = selAts[0]
r = a.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)
if not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
if "SC " in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
scZ, cc = zRotSideChain ( r.molecule, r, 3.0, dmap, show=True )
print "- scZ %.3f, cc %.3f" % (scZ, cc)
#print "%f\t%f\t%f" % (r.sigma,scZ,cc)
def RotaZ1 (self) :
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
a = selAts[0]
r = a.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)
if not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
if "SC " in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(r.molecule.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
rZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=10, toRAD=2 )
#scZ, cc = zRotSideChain ( r.molecule, r, 3.0, dmap, show=True )
print "- radZ %.3f " % (rZ)
#print "%f\t%f\t%f" % (r.sigma,scZ,cc)
def R1 (self) :
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = self.cur_dmap
a = selAts[0]
r = a.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)
if not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
if "SC " in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ress = []
bbAtoms = []
allAtoms = []
for r in a.molecule.residues :
if r.id.chainId == a.residue.id.chainId :
ress.append ( r )
bbAtoms.extend ( r.bbAtoms )
allAtoms.extend ( r.atoms )
avgD = avgdAts ( allAtoms, dmap )
bbAvgD = avgdAts ( bbAtoms, dmap )
print " - avgd - all: %f, bb: %f" % (avgD, bbAvgD)
r = a.residue
if len(r.scAtoms) > 0 :
scAvgD = avgdAts ( r.scAtoms, dmap )
r.SCBBr = scAvgD / bbAvgD
print " - residue %s.%d, %d side chain atoms, avgd: %.5f, r: %.5f" % ( r.type, r.id.position, len(r.scAtoms), scAvgD, r.SCBBr/bbAvgD )
else :
r.SCBBr = None
print " - residue %s.%d - no side chain atoms" % ( r.type, r.id.position )
def AlignRes1 ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return
if self.cur_dmap == None :
umsg ("Select a map first")
return
#SetBBAts ( self.cur_mol )
last_x = 0.0
last_y = 0.0
r0, exR0, xtR0 = None, None, None
alAts = []
if self.exType == "ASP" : alAts = ["CG","OD1","OD2"]
if self.exType == "LEU" : alAts = ["CG","CD1","CD2"]
if self.exType == "GLU" : alAts = ["CD","OE1","OE2"]
if self.exType == "TYR" : alAts = ["OH","CE1","CE2","CD1","CD2","CG","CB"]
for r in self.cur_mol.residues :
if r.id.chainId == chainId and r.type == self.exType :
print " - res %s %d" % (r.type, r.id.position)
if r0 == None :
r0 = r
r.exMaps[0].display = True
r.exMaps[1].display = False
#r.xtMaps[0].display = False
#r.xtMaps[1].display = False
for at in r.atoms :
at.display = at.name in alAts
else :
exR0 = r0.exMol.residues[0]
exR = r.exMol.residues[0]
ats0, ats = [], []
for atName in alAts :
ats0.append ( exR0.atomsMap[atName][0] )
ats.append ( exR.atomsMap[atName][0] )
for at in r.atoms :
at.display = at.name in alAts
#aCG0, aOD10, aOD20 = exR0.atomsMap['CG'][0], exR0.atomsMap['OD1'][0], exR0.atomsMap['OD2'][0],
#aCG, aOD1, aOD2 = exR.atomsMap['CG'][0], exR.atomsMap['OD1'][0], exR.atomsMap['OD2'][0],
#xf, rmsd = chimera.match.matchPositions ( pts_o, pts_c )
#xf, rmsd = chimera.match.matchAtoms ( [aCG0, aOD10, aOD20], [aCG, aOD1, aOD2] )
xf, rmsd = chimera.match.matchAtoms ( ats0, ats )
print " - rmsd: ", rmsd
#from _multiscale import get_atom_coordinates
#points = get_atom_coordinates ( atoms, transformed = True )
#exR.xf0 = r.exMol.openState.xform
mxf = r0.exMol.openState.xform
mxf.multiply ( xf )
r.exMol.openState.xform = mxf
r.exMaps[0].openState.xform = mxf
r.exMaps[1].openState.xform = mxf
r.exMaps[0].display = True
r.exMaps[1].display = False
#r.xtMaps[0].display = False
#r.xtMaps[1].display = False
#break
def AlignRes2 ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return
if self.cur_dmap == None :
umsg ("Select a map first")
return
#SetBBAts ( self.cur_mol )
last_x = 0.0
last_y = 0.0
r0, exR0, xtR0 = None, None, None
for r in self.cur_mol.residues :
if r.id.chainId == chainId and r.type == "ASP" :
print " - res %s %d" % (r.type, r.id.position)
if r0 == None :
r0 = r
r.exMaps[0].display = False
r.exMaps[1].display = False
r.xtMaps[0].display = True
r.xtMaps[1].display = False
else :
r.exMaps[0].display = False
r.exMaps[1].display = False
exR0 = r0.xtMol.residues[0]
aCB0, aCG0, aOD10, aOD20 = exR0.atomsMap['CB'][0], exR0.atomsMap['CG'][0], exR0.atomsMap['OD1'][0], exR0.atomsMap['OD2'][0],
exR = r.xtMol.residues[0]
aCB, aCG, aOD1, aOD2 = exR.atomsMap['CB'][0], exR.atomsMap['CG'][0], exR.atomsMap['OD1'][0], exR.atomsMap['OD2'][0],
#xf, rmsd = chimera.match.matchPositions ( pts_o, pts_c )
xf, rmsd = chimera.match.matchAtoms ( [aCB0, aCG0, aOD10, aOD20], [aCB, aCG, aOD1, aOD2] )
print " - rmsd: ", rmsd
#from _multiscale import get_atom_coordinates
#points = get_atom_coordinates ( atoms, transformed = True )
#exR.xf0 = r.exMol.openState.xform
mxf = r0.xtMol.openState.xform
mxf.multiply ( xf )
r.xtMol.openState.xform = mxf
r.xtMaps[0].openState.xform = mxf
r.xtMaps[1].openState.xform = mxf
r.xtMaps[0].display = True
r.xtMaps[1].display = False
#break
def Avg ( self ) :
print " -- finding base map --- "
largestMap = None
maxD = 0
for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :
if m.display == True :
d = numpy.sum ( m.data.size )
if d > maxD :
maxD = d
largestMap = m
print " - largest map: ", largestMap.name
dmap = largestMap
dmap.display = False
fmap = None
avgMat = dmap.data.full_matrix()
N = 0.0
print " ----------- Averaging... ---------------------"
for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :
if m.display == True and m != dmap :
print m.name
df_mat = self.Map2Map ( m, dmap )
m.display = False
N = N + 1.0
avgMat = avgMat + df_mat
print " ----------- n=%f ---------------------" % N
avgMat = avgMat / N
df_data = VolumeData.Array_Grid_Data ( avgMat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name="avg" )
MapFromData ( df_data, "Avg", dmap, False )
MapFromData ( df_data, "Avg", dmap, True )
#df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )
#df_v.name = "Avg"
#df_v.openState.xform = dmap.openState.xform
#nv = self.ShrinkMap ( df_v, 1e-3 )
def Map2Map ( self, densitiesFromMap, toGridOfMap, mask = False ) :
fmap = toGridOfMap
dmap = densitiesFromMap
import _contour
n1, n2, n3 = fmap.data.size[0], fmap.data.size[1], fmap.data.size[2]
f_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices
_contour.affine_transform_vertices( f_points, fmap.data.ijk_to_xyz_transform )
d_vals = dmap.interpolated_values ( f_points, fmap.openState.xform )
df_mat = d_vals.reshape( (n3,n2,n1) )
if mask :
f_mat = fmap.data.full_matrix()
f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )
df_mat = df_mat * f_mask
return df_mat
def CloseExtracted ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return
if self.cur_dmap == None :
umsg ("Select a map first")
return
for r in self.cur_mol.residues :
if hasattr ( r, "exMaps" ) :
chimera.openModels.close ( r.exMaps ); del r.exMaps
if hasattr ( r, "xtMaps" ) :
chimera.openModels.close ( r.xtMaps ); del r.xtMaps
if hasattr ( r, "exMol" ) :
chimera.openModels.close ( [r.exMol] ); del r.exMol
if hasattr ( r, "xtMol" ) :
chimera.openModels.close ( [r.xtMol] ); del r.xtMol
for m in chimera.openModels.list() :
if m.name == "Avg" or m.name == "Avg_mesh" :
chimera.openModels.close ( [m] )
def Extract ( self ) :
if self.cur_mol == None :
umsg ("Select a molecule first")
return
chainId = self.chain.get()
if len(chainId) == 0 :
umsg ("Select a chain first")
return
if self.cur_dmap == None :
umsg ("Select a map first")
return
#SetBBAts ( self.cur_mol )
last_x = 0.0
last_y = 0.0
print "Extracting - %s - %s - %s" % (self.cur_dmap.name, self.cur_mol.name, chainId)
#self.exType = "TYR"
#self.exType = "GLU"
self.exType = "ASP"
#self.exType = "LEU"
yzAts = { "ASP" : ["CB","CG","OD1"],
"GLU" : ["CG","CD","OE1"],
"TYR" : ["CB","CZ","CD1"],
"LEU" : ["CB","CG","CD1"]
}
for r in self.cur_mol.residues :
if r.id.chainId == chainId and r.type == self.exType :
print " - res %s %d" % (r.type, r.id.position)
self.ExtractRes ( r, self.cur_mol, self.cur_dmap, last_x, last_y, yzAts[self.exType] )
#self.ExtendRes ( r, self.cur_mol, self.cur_dmap, last_x, -8.0, thrF=0.8 )
last_x += 7.0
#break
def ExtractRes ( self, r, mol, dmap, atX, atY, xyAts ) :
nmol, nres = CopyRess ( [r] )
nmol.name = mol.name + "_%s_%d" % (r.type, r.id.position)
chimera.openModels.add ( [nmol] )
nmol.openState.xform = mol.openState.xform
for at in nmol.atoms :
#at.drawMode = 3
if at.element.name in atomColors : at.color = atomColors[at.element.name]
#at.radius = at.radius * 0.8
mname = dmap.name + "_%s_%d" % (r.type, r.id.position)
#aCB, aCG, aOD1 = r.atomsMap['CB'][0], r.atomsMap['CG'][0], r.atomsMap['OD1'][0]
aCB, aCG, aOD1 = r.atomsMap[xyAts[0]][0], r.atomsMap[xyAts[1]][0], r.atomsMap[xyAts[2]][0]
dmap, mmap = ExtractDen ( r.atoms, dmap, mname, boundRad=2.0, showMesh=True )
r.exMol = nmol
r.exMaps = [dmap, mmap]
X = aOD1.coord() - aCB.coord(); X.normalize()
Y = aCG.coord() - aCB.coord(); Y.normalize()
Z = chimera.cross ( X, Y ); Z.normalize()
X = chimera.cross ( Y, Z ); Y.normalize()
xf = chimera.Xform.coordFrame ( X, Y, Z, aCB.coord(), True ).inverse()
xf.premultiply ( chimera.Xform.translation(atX, atY, 0) )
nmol.openState.xform = xf
dmap.openState.xform = xf
if mmap : mmap.openState.xform = xf
def ExtendRes ( self, r, mol, dmap, atX, atY, thrF=0.75 ) :
nmol, nres = CopyRess ( [r] )
nmol.name = mol.name + "_%s_%d_ext" % (r.type, r.id.position)
chimera.openModels.add ( [nmol] )
nmol.openState.xform = mol.openState.xform
for at in nmol.atoms :
at.drawMode = 3
if at.element.name in atomColors : at.color = atomColors[at.element.name]
at.radius = at.radius * 0.8
mname = dmap.name + "_%s_%d_ext" % (r.type, r.id.position)
R = nres[0]
R.O, R.N, R.C, R.CA = R.atomsMap["O"][0], R.atomsMap["N"][0], R.atomsMap["C"][0], R.atomsMap["CA"][0]
R.CB, R.CG, R.OD1, R.OD2 = R.atomsMap["CB"][0], R.atomsMap["CG"][0], R.atomsMap["OD1"][0], R.atomsMap["OD2"][0]
bones = []
bones.append ( Bone(R.CA, R.N, R.CB) )
bones.append ( Bone(R.CA, R.C, R.CB) )
bones.append ( Bone(R.C, R.O, R.CA) )
bones.append ( Bone(R.CA, R.CB, R.N) )
bones.append ( Bone(R.CG, R.CB, R.OD1) )
bones.append ( Bone(R.CG, R.OD1, R.OD2) )
bones.append ( Bone(R.CG, R.OD2, R.OD1) )
for bi, bo in enumerate ( bones ) :
if GetMod ( "bone_%d.mrc" % bi ) != None : chimera.openModels.close ( "bone_%d.mrc" % bi )
if GetMod ( "bone_%d.mrc_mesh" % bi ) != None : chimera.openModels.close ( "bone_%d.mrc_mesh" % bi )
bo.dmap = BoneMap ( bo, dmap, 1.0, "bone_%d.mrc" % bi, show = False, showMesh=True )
v1 = R.CB.coord() - R.CA.coord(); v1.normalize()
v2 = R.CB.coord() - R.CG.coord(); v2.normalize()
ang = numpy.arccos ( v1*v2 ) * 180.0/numpy.pi
ax = chimera.cross ( v1, v2 ); ax.normalize()
print "CB-CG: %.2f" % (-ang + 180)
T = chimera.Xform.translation ( R.CB.coord().toVector() )
T.multiply ( chimera.Xform.rotation ( ax, -ang + 180 ) )
T.multiply ( chimera.Xform.translation ( R.CB.coord().toVector()*-1.0 ) )
for an in ["CG", "OD1", "OD2"] :
at = R.atomsMap[an][0]
at.setCoord ( T.apply (at.coord()) )
#MoldMap2 ( bones, rmaps[0], rmaps[1] )
d1 = diha ( R.N, R.CB, R.CG, R.OD1 )
d2 = diha ( R.N, R.CB, R.CG, R.OD2 )
ang = d1 if numpy.abs(d1) < numpy.abs(d2) else d2
print "CG dihedral - ", d1, d2, " -> ", ang
ax = R.CG.coord() - R.CB.coord(); ax.normalize()
T = chimera.Xform.translation ( R.CG.coord().toVector() )
T.multiply ( chimera.Xform.rotation ( ax, -ang ) )
T.multiply ( chimera.Xform.translation ( R.CG.coord().toVector()*-1.0 ) )
for an in ["OD1", "OD2"] :
at = R.atomsMap[an][0]
at.setCoord ( T.apply (at.coord()) )
dmap, dmesh = MapForAtoms ( R.atoms, dmap, mname, showMesh=True, thrF=thrF )
MoldMap2 ( bones, dmap, dmesh )
r.xtMol = nmol
r.xtMaps = [dmap, dmesh]
X = R.OD1.coord() - R.CB.coord(); X.normalize()
Y = R.CG.coord() - R.CB.coord(); Y.normalize()
Z = chimera.cross ( X, Y ); Z.normalize()
X = chimera.cross ( Y, Z ); Y.normalize()
xf = chimera.Xform.coordFrame ( X, Y, Z, R.CB.coord(), True ).inverse()
xf.premultiply ( chimera.Xform.translation(atX, atY, 0) )
nmol.openState.xform = xf
dmap.openState.xform = xf
if dmesh : dmesh.openState.xform = xf
def asp ( self ) :
N = 1
framei = 0
mpath = "/Users/greg/Desktop/frames"
for f in os.listdir ( mpath ) :
if f.endswith(".png") :
os.remove( mpath + "/" + f )
dmap, mol = VisMapMod()
resolution = 3.0 * dmap.data.step[0]
print "Map: %s, mol: %s" % (dmap.name, mol.name)
res = chimera.selection.currentResidues()[0]
print " - res: %s %d.%s" % (res.type, res.id.position, res.id.chainId)
z = None
nname = "%s_%d" % ( res.type, res.id.position )
#for na in ["ASP","molded.mrc","skinned.mrc"] :
# m = GetMod ( na )
# if m != None :
# chimera.openModels.close ( [m] )
nmol = GetMod ( nname + ".pdb" )
if nmol == None :
nmol, nres = CopyRess ( [res] )
nmol.name = nname + ".pdb"
chimera.openModels.add ( [nmol] )
nmol.openState.xform = mol.openState.xform
xf = nmol.openState.xform
#xf.multiply ( chimera.Xform.translation ( 0,0,5 ) )
nmol.openState.xform = xf
for at in nmol.atoms:
at.drawMode = 3
if at.element.name in atomColors :
at.color = atomColors[at.element.name]
at.radius = at.radius * 0.8
nres = nmol.residues
R = nres[0]
R.O = R.atomsMap["O"][0]
R.N = R.atomsMap["N"][0]
R.C = R.atomsMap["C"][0]
R.CA = R.atomsMap["CA"][0]
R.CB = R.atomsMap["CB"][0]
R.CG = R.atomsMap["CG"][0]
R.OD1 = R.atomsMap["OD1"][0]
R.OD2 = R.atomsMap["OD2"][0]
bones = []
bones.append ( Bone(R.CA, R.N, R.CB) )
bones.append ( Bone(R.CA, R.C, R.CB) )
bones.append ( Bone(R.C, R.O, R.CA) )
bones.append ( Bone(R.CA, R.CB, R.N) )
bones.append ( Bone(R.CG, R.CB, R.OD1) )
bones.append ( Bone(R.CG, R.OD1, R.OD2) )
bones.append ( Bone(R.CG, R.OD2, R.OD1) )
for bi, bo in enumerate ( bones ) :
if GetMod ( "bone_%d.mrc" % bi ) != None : chimera.openModels.close ( "bone_%d.mrc" % bi )
if GetMod ( "bone_%d.mrc_mesh" % bi ) != None : chimera.openModels.close ( "bone_%d.mrc_mesh" % bi )
bo.dmap = BoneMap ( bo, dmap, 1.0, "bone_%d.mrc" % bi, show = False, showMesh=True )
v1 = R.CB.coord() - R.CA.coord(); v1.normalize()
v2 = R.CB.coord() - R.CG.coord(); v2.normalize()
ang = numpy.arccos ( v1*v2 ) * 180.0/numpy.pi
print ang
ax = chimera.cross ( v1, v2 ); ax.normalize()
dmap.display = False
mol.display = False
NB = 2
#N = 90
toAng = -ang + 180
dAng = toAng / float(N)
print "CB-CG: %.2f/%.2f deg" % (toAng, dAng)
rmaps = None
for i in range ( N ) :
print i,
T = chimera.Xform.translation ( R.CB.coord().toVector() )
#T.multiply ( chimera.Xform.rotation ( ax, -ang + 180 ) )
T.multiply ( chimera.Xform.rotation ( ax, dAng ) )
T.multiply ( chimera.Xform.translation ( R.CB.coord().toVector()*-1.0 ) )
for an in ["CG", "OD1", "OD2"] :
at = R.atomsMap[an][0]
at.setCoord ( T.apply (at.coord()) )
#SkinMap ( R.atoms, bones, NB, dmap, 2.0, "skinned.mrc", True)
#MoldMap ( R.atoms, bones, dmap, "molded.mrc", showMesh=True )
if rmaps == None :
rmaps = MapForAtoms ( R.atoms, dmap, nname+".mrc", showMesh=True )
# for m in rmaps :
# if m != None :
# m.openState.xform = nmol.openState.xform
MoldMap2 ( bones, rmaps[0], rmaps[1] )
if N > 1 :
chimera.viewer.postRedisplay()
self.toplevel_widget.update_idletasks ()
chimera.printer.saveImage ( mpath + "/%06d.png" % framei )
framei += 1
print ""
if 1 :
d1 = diha ( R.N, R.CB, R.CG, R.OD1 )
d2 = diha ( R.N, R.CB, R.CG, R.OD2 )
ang = d1 if numpy.abs(d1) < numpy.abs(d2) else d2
print "CG dihedral - ", d1, d2, " -> ", ang
ax = R.CG.coord() - R.CB.coord(); ax.normalize()
toAng = -ang
dAng = toAng / float( max(N/2,1) )
print "CG dihedral -- %.2f/%.2f deg" % (toAng, dAng)
for i in range ( max(N/2,1) ) :
print i,
T = chimera.Xform.translation ( R.CG.coord().toVector() )
T.multiply ( chimera.Xform.rotation ( ax, dAng ) )
T.multiply ( chimera.Xform.translation ( R.CG.coord().toVector()*-1.0 ) )
for an in ["OD1", "OD2"] :
at = R.atomsMap[an][0]
at.setCoord ( T.apply (at.coord()) )
#print "%d bones" % len(bones)
#PtsToMapSkinD ( R.atoms, bones, NB, dmap, 2.0, "skinned.mrc", True)
#MoldMap ( R.atoms, bones, dmap, "molded.mrc", showMesh=True )
MoldMap2 ( bones, rmaps[0], rmaps[1] )
if N > 1 :
chimera.viewer.postRedisplay()
self.toplevel_widget.update_idletasks ()
chimera.printer.saveImage ( mpath + "/%06d.png" % framei )
framei += 1
if N > 1 :
args = [ "/Users/greg/_mol/Chimera.app/Contents/Resources/bin/ffmpeg", "-r", "30",
"-i", mpath + "/%06d.png", "-y", "-qscale", "1", "-b", "9000", "-vcodec", "mpeg4", # mpeg4 libx264
"-f", "mov", mpath+"/__ares.mov" ]
print "- running: "
for a in args : print a,
print ""
import subprocess
subprocess.call ( args )
print "done!\n"
def AddSpherePts ( pts, clr, rad, mname = "RAD points" ) :
from chimera import elements, Coord, Atom, MolResId
ptsMol = GetMod ( mname )
res = None
if ptsMol == None:
from chimera import Molecule, openModels
ptsMol = Molecule()
ptsMol.name = mname
ptsMol.isRealMolecule = False
openModels.add ( [ptsMol], noprefs = True )
res = ptsMol.newResidue('marker', chimera.MolResId('1', 1) )
else :
res = ptsMol.residues[0]
for pt in pts :
a = ptsMol.newAtom('', elements.H)
res.addAtom(a)
a.setCoord ( chimera.Point(*pt) ) # ( chimera.Point(*xyz) )
a.radius = rad
a.drawMode = Atom.Sphere
a.color = chimera.MaterialColor ( *clr )
a.surfaceCategory = 'markers'
def SpherePts ( ctr, rad, N ) :
thetas, phis = [], []
from math import acos, sin, cos, sqrt, pi
for k in range ( 1, N+1 ) :
h = -1.0 + ( 2.0*float(k-1)/float(N-1) )
phis.append ( acos(h) )
thetas.append ( 0 if k == 1 or k == N else
(thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )
pts = [None] * N
for i, theta, phi in zip ( range(N), thetas, phis ):
v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))
#if numpy.abs ( v.length - 1.0 ) > 1e-3 :
# print "x"
pt = ctr + v * rad
pts[i] = pt
return pts
import threading
def Calc_ ( label="" ) :
print "Calc all scores -", label
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
#allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
allAtTree = None
cc, ccm, dr, bbRadZ, scRadZ, scRotaZ, q, qcc, emr = 0,0,0,0,0,0,0,0,0
if 0 :
#cc, ccm, dr, ccr, ccmr = CalcSCBBr ( mol, mol.residues[0].id.chainId, dmap )
cc, ccm, dr, ccr, ccmr = CalcSCBBr ( mol, None, dmap )
if 0 :
#bbSig, scSig = CalcSigma ( mol, mol.residues[0].id.chainId, dmap, allAtTree, useOld=False, log=False )
#bbRadZ, scRadZ = CalcRadZ ( mol, mol.residues[0].id.chainId, dmap, allAtTree, useOld=False, log=False )
#q, qcc = CalcQ ( mol, mol.residues[0].id.chainId, dmap, allAtTree=allAtTree )
#q, qcc = CalcQ ( mol, None, dmap, allAtTree=allAtTree )
q, qcc = CalcQp ( mol, None, dmap, allAtTree=allAtTree )
if 0 :
bbRadZ, scRadZ = CalcRadZ ( mol, None, dmap, allAtTree, useOld=False, log=False )
if 0 :
print 'Side Chain Rota-Z for %d ress' % len(mol.residues)
Zs = CalcRotaZ ( dmap, mol, mol.residues )
scRotaZ = numpy.average ( Zs )
if 1 :
emr = emringer (dmap, mol)
if 0 :
#fp = open ( "/Users/greg/Dropbox/_mapsq/scores6_Q_allc_%s.txt" % label, "a" )
fp = open ( "/home/greg/Dropbox/_mapsq/scores6_Q_allc_%s.txt" % label, "a" )
fp.write ( "%s\t%f\t%f\t%f\t%f\t%f\t%f\n" % (dmap.name.split("_")[0], cc, ccm, dr, q, qcc,emr) )
#fp.write ( "%s\t%f\n" % (dmap.name.split("_")[0], scRotaZ) )
fp.close ()
def emringer ( dmap, mol ) :
print "----- %s ____________ EMRINGER ____________ %s -----" % (dmap.name, mol.name)
cdir = os.getcwd()
print " - now in: ", cdir
#print " - splitting " + mol.openedAs[0]
mpath, mname = os.path.split ( mol.openedAs[0] )
dpath, dname = os.path.split ( dmap.data.path )
bs = os.path.splitext ( mol.openedAs[0] )[0]
print " - copying mol file... removes symmetry/connect stuff"
fin = open ( mol.openedAs[0], "r" )
fout = open ( bs + "_.pdb", "w" )
for line in fin :
if "ATOM" in line or "HETATM" in line :
fout.write ( line )
fin.close ()
fout.close ()
phPath = "/Users/greg/_mol/phenix-1.14-3260/build/bin/"
args = [phPath+'phenix.emringer', dmap.data.path, bs+"_.pdb" ]
print "running: ",
for arg in args : print arg,
print ""
outf = mpath + '/' + '_out.txt'
errf = mpath + '/' + '_err.txt'
fout = open ( outf, "w" )
ferr = open ( errf, "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)
p.wait()
fout.close()
ferr.close()
print " - getting score from " + outf
score = -1
fin = open ( outf )
for l in fin :
if "EMRinger Score:" in l :
s = l [ len("EMRinger Score:")+1 : ]
print "Score: ", s
score = float( s )
print " - found score: %.3f" % score
print " - removing ", bs + "_.pdb"
os.remove ( bs + "_.pdb" )
os.remove ( bs + "__emringer.pkl" )
os.remove ( bs + "__emringer.csv" )
import shutil
shutil.rmtree ( bs + "__emringer_plots" )
return score
def CalcR_ ( label = "" ) :
print "Calc all scores -", label
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
ddir, dfile = os.path.split(dmap.data.path)
molFile = mol.openedAs[0]
mdir, mfile = os.path.split(molFile)
print "PhFmap -- " + molFile
RES = 3.0
print " -- res %.1f -- " % RES
outFile = molFile + "_r%.0f" % RES + "_fmodel.ccp4"
if not os.path.isfile ( outFile ) :
phPath = "/usr/local/phenix-1.14-3260/build/bin/"
args = [phPath+'phenix.fmodel', "high_resolution=%.1f"%RES, "scattering_table=electron", "generate_fake_p1_symmetry=True", molFile ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_0_fmodel.log', "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print ""
args = [phPath+'phenix.mtz2map', "high_resolution=%.1f"%RES, "include_fmodel=true", "scattering_table=electron", molFile, molFile + ".mtz" ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_1_mtz2map.log', "w" )
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print " - renaming to:", outFile
os.rename( molFile + "_fmodel.ccp4", outFile )
os.remove( molFile + ".mtz" )
print " - loading map:", outFile
dm = chimera.openModels.open ( outFile )[0]
import FitMap
molg = MyMolMapX ( mol, mol.atoms, RES, dmap.data.step[0], chimera.Xform.identity() )
fpoints, fpoint_weights = fit_points_g ( molg, 0.1 )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
import FitMap
mmolap, mmcorr1, mmcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Molmap - olap: %f, CC: %f, CCm: %f" % (mmolap, mmcorr1, mmcorr2)
fpoints, fpoint_weights = fit_points_g ( dm.data, 5.0 )
map_values = dmap.interpolated_values ( fpoints, dm.openState.xform )
olap, phcorr1, phcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, phcorr1, phcorr2)
#fpoints, fpoint_weights = fit_points_g ( dmap.data, -1e6 )
#map_values = dm.interpolated_values ( fpoints, dmap.openState.xform )
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, corr1, corr2)
print "%f\t%f\t%f\t%f" % (mmcorr1, mmcorr2, phcorr1, phcorr2)
fp = open ( "/Users/greg/Dropbox/_mapsq/scores3_R_%s.txt" % label, "a" )
fp.write ( "%s\t%f\t%f\t%f\t%f\n" % (dmap.name.split("_")[0], mmcorr1, mmcorr2, phcorr1, phcorr2) )
fp.close ()
def CalcSCBBr ( mol, cid, dmap ) :
print "Calculating sc-bb ratios..."
ress = []
bbAtoms = []
allAtoms = []
scAtoms = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
bbAtoms.extend ( r.bbAtoms )
allAtoms.extend ( r.atoms )
scAtoms.extend ( r.scAtoms )
bbAvgD, scAvgD = avgdAts ( bbAtoms, dmap ), avgdAts ( scAtoms, dmap )
print " - avgd - bb: %.3f, sc: %.3f" % (bbAvgD, scAvgD)
bbCC, bbCCm = ccAts ( bbAtoms, dmap, 2.0)
print " - all bb cc: %.3f, ccm: %.3f" % (bbCC, bbCCm)
cc, ccm = ccAts ( allAtoms, dmap, 2.0)
print " - all cc: %.3f, ccm: %.3f" % (cc, ccm)
dr, ccr, ccmr = [], [], []
for r in ress :
if len(r.scAtoms) > 0 :
scAvgD = avgdAts ( r.scAtoms, dmap )
#rbbAvgD = avgdAts ( r.bbAtoms, dmap )
r.SCBBr = scAvgD / bbAvgD
dr.append ( scAvgD / bbAvgD )
scCC, scCCm = ccAts ( r.scAtoms, dmap, 2.0)
ccr.append ( scCC/bbCC )
ccmr.append ( scCCm/bbCCm )
r.SCBBr = scCCm
else :
r.SCBBr = None
print " - avg-r d:%.3f, cc:%.3f, ccm: %.3f" % ( numpy.average ( dr ), numpy.average ( ccr ), numpy.average ( ccmr ) )
return cc, ccm, numpy.average ( dr ), numpy.average ( ccr ), numpy.average ( ccmr )
def ccAts ( atoms, dmap, resolution=3.0 ) :
mol = atoms[0].molecule
molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], chimera.Xform.identity() )
fpoints, fpoint_weights = fit_points_g ( molg, 1e-3 )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
olap, bbCC, bbCCm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
return bbCC, bbCCm
def avgdAts ( atoms, dmap ) :
if len(atoms) < 1 :
#print " - no atoms" % len(atoms)
return 0
from _multiscale import get_atom_coordinates
apos = get_atom_coordinates(atoms, transformed = False)
dvals = dmap.interpolated_values ( apos, atoms[0].molecule.openState.xform )
#print dvals
return numpy.average(dvals)
def CalcRadZ ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Rad-Z Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
for ri, r in enumerate ( ress ) :
r.scZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
r.bbZ = RadZ ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if r.bbZ != None :
scoresBB.append ( r.bbZ )
if r.scZ != None :
scoresSC.append ( r.scZ )
print " - avg radz - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
return numpy.average(scoresBB), numpy.average(scoresSC)
def qwork (num, ress, dmap, allAtTree, log):
print 'qwork %d - %d res, %d - %d' % (num, len(ress), ress[0].id.position, ress[-1].id.position)
for ri, r in enumerate ( ress ) :
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if num == 0 and log :
status ( "Calculating Q scores - %d/%d" % (ri, len(ress)) )
print ".",
def CalcSigma ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Sigma Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
if 0 :
import multiprocessing, threading
N = 4 # multiprocessing.cpu_count()
print " - cores: %d" % N
dn = len(ress) / N
threads = []
for i in range(N):
l = i * dn
h = (i+1)*dn if i != N-1 else len(ress)
#print "t %d, %d-%d" % (i, l, h)
#t = threading.Thread(target=qwork, args=(i,ress[l:h], dmap, allAtTree))
#threads.append(t)
#t.start()
#t = threading.Thread(name='d%d'%i, target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
#t.setDaemon(True)
#t.start()
#threads.append(t)
#print __name__
if 1 or __name__ == '__main__':
p = ctx.Process(target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
p.start()
threads.append(p)
for i, t in enumerate(threads) :
print "j %d" % (i)
t.join()
else :
for ri, r in enumerate ( ress ) :
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
if r.bbZ != None : scoresBB.append ( r.bbZ )
if r.scZ != None : scoresSC.append ( r.scZ )
#sc = [x for x in scores if x is not None]
#scSC = [1.0/x for x in scoresSC if x is not None]
#scBB = [1.0/x for x in scoresBB if x is not None]
#print " - %d res, SC min %.2f max %.2f, avg %.2f" % (len(ress), min(scSC), max(scSC), numpy.average(scSC))
print " - avg sigma - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
if 0 :
sByType = {}
rByType = {}
for r in ress :
if r.scZ != None :
if not r.type in sByType :
rByType[r.type] = []
sByType[r.type] = []
rByType[r.type].append ( [r.scZ, r] )
sByType[r.type].append ( [r.scZ] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
avgs.sort ( reverse=True, key=lambda x: x[0] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype in avgs :
rscores = rByType[rtype]
rscores.sort ( reverse=False, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[rtype]
else : rts = nucleic3to1[rtype]
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore) )
fp.close()
return numpy.average(scoresBB), numpy.average(scoresSC)
def CalcResQ (r, dmap, sigma, allAtTree=None, numPts=8, toRAD=2.0, dRAD=0.1, minD=0.0, maxD=1.0, useOld=False ) :
scQ, bbQ, Q, numSC, numBB = 0.0, 0.0, 0.0, 0.0, 0.0
for at in r.atoms :
if at.element.name == "H" :
continue
if not hasattr ( at, 'Q' ) or not useOld :
cc, ccm = RadCC ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=numPts, toRAD=toRAD, dRAD=dRAD, minD=minD, maxD=maxD )
at.Q = ccm
at.CC = cc
Q += at.Q
if r.isProt or r.isNA :
if at.isBB :
bbQ += at.Q
numBB += 1.0
else :
scQ += at.Q
numSC += 1.0
if r.isProt or r.isNA :
if int(numSC) != len(r.scAtoms) :
print " - res %d.%s.%s - %.0f/%d sc atoms" % (r.id.position,r.type,r.id.chainId, numSC, len(r.scAtoms))
if numSC > 0 :
r.scQ = scQ / numSC
else :
r.scQ = None
if numBB > 0 :
r.bbQ = bbQ / numBB
else :
r.bbQ = None
r.Q = Q / float ( len(r.atoms) )
def CalcQ_ ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=False, log=False ) :
print "Q Scores - in parallel"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
print " - residues to do: %d" % len(ress)
import multiprocessing
threads = multiprocessing.cpu_count() / 2
print 'calc q using %d threads' % threads
# Avoid periodic Python context switching.
import sys
original_check_interval = sys.getcheckinterval()
sys.setcheckinterval(1000000000)
# Define thread class for fitting.
from threading import Thread
class Q_Thread(Thread):
def __init__(self, ress, ti):
Thread.__init__(self)
self.ress = ress
self.ti = ti
def run(self):
print "run - %d - %d" % (self.ti, len(ress))
for ri, r in enumerate ( self.ress ) :
#CalcResQ (r, dmap, sigma, allAtTree=allAtTree, numPts=2, toRAD=2.0, dRAD=0.2 )
#print "%d-%d/%d" % (ti,ri/len(self.ress)),
for at in r.atoms :
if at.element.name != "H" :
cc, ccm = RadCC ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5 )
# Starts threads with each calculating an equal number of fits.
n = len(ress)
g = [ress[(n*c)/threads:(n*(c+1))/threads] for c in range(threads)]
threads = []
for mi, ml in enumerate(g) :
#print "%d - %d, %d-%d" % (mi,len(ml),ml[0].id.position,ml[-1].id.position)
t = Q_Thread(ml,mi)
threads.append(t)
for t in threads:
t.start()
print ""
# Wait for all threads to finish
for t in threads:
t.join()
# Restore periodic context switching.
sys.setcheckinterval(original_check_interval)
# Collect fit results from all threads.
#for t in threads:
# print "",
def CalcQ ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=False, log=False ) :
print "Q Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
import time
start = time.time()
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
print " - residues to do: %d" % len(ress)
for ri, r in enumerate ( ress ) :
CalcResQ (r, dmap, sigma, allAtTree=allAtTree, numPts=2, toRAD=2.0, dRAD=0.5, useOld=useOld)
if (ri+1) % 10 == 0 :
if log : status ( "Calculating Q scores - res %d/%d" % (ri+1, len(ress)) )
print ".",
scores, scoresBB, scoresSC, scoresQ, scoresCC = [], [], [], [], []
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
scores.append ( r.Q )
if r.bbQ != None : scoresBB.append ( r.bbQ )
if r.scQ != None : scoresSC.append ( r.scQ )
for at in r.atoms :
scoresQ.append ( at.Q )
scoresCC.append ( at.CC )
#sc = [x for x in scores if x is not None]
#scSC = [1.0/x for x in scoresSC if x is not None]
#scBB = [1.0/x for x in scoresBB if x is not None]
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
print " - residue Q min %.3f max %.3f, avg %.3f" % (min(scores), max(scores), numpy.average(scores))
print " - backbone Q min %.3f max %.3f, avg %.3f" % (min(scoresBB), max(scoresBB), numpy.average(scoresBB))
print " - side chain Q min %.3f max %.3f, avg %.3f" % (min(scoresSC), max(scoresSC), numpy.average(scoresSC))
print " - atom Q min %.3f max %.3f, avg %.3f" % (min(scoresQ), max(scoresQ), numpy.average(scoresQ))
print " - atom CC min %.3f max %.3f, avg %.3f" % (min(scoresCC), max(scoresCC), numpy.average(scoresCC))
if 0 :
sByType = {}
rByType = {}
for r in ress :
if r.scZ != None :
if not r.type in sByType :
rByType[r.type] = []
sByType[r.type] = []
rByType[r.type].append ( [r.scZ, r] )
sByType[r.type].append ( [r.scZ] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
avgs.sort ( reverse=True, key=lambda x: x[0] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype in avgs :
rscores = rByType[rtype]
rscores.sort ( reverse=False, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[rtype]
else : rts = nucleic3to1[rtype]
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore) )
fp.close()
return numpy.average(scoresQ), numpy.average(scoresCC)
def CalcQForOpenModelsRess () :
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
minD, maxD = MinMaxD ( dmap )
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
#rids = {}
#for r in mol.residues :
# rids["%d.%s" % (r.id.position,r.id.chainId)] = r
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
fin = open ( os.path.splitext ( dmap.data.path )[0] + ".txt" )
fout = open ( os.path.splitext ( dmap.data.path )[0] + "_out.txt", "w" )
foutn = os.path.splitext ( dmap.data.path )[0] + "_stat.txt"
sig_at = []
for l in fin :
#print l,
sigma, atIdStr = l.split()
if not atIdStr in atids :
print " - atid not found: ", atIdStr
at = atids[atIdStr.strip()]
sigma = float(sigma)
sig_at.append ( [sigma, at, atIdStr] )
fs = open ( foutn, "w" ); fs.write ( "%d/%d" % (0,len(sig_at) ) ); fs.close()
i = 0
for sigma, at, atId in sig_at :
#print "%d.%s.%s" % (r.id.position,r.id.chainId,at.name),
cc, ccm = RadCC ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
#print cc, ccm
fout.write ( "%s %f %f\n" % (atId, cc, ccm) )
if i%10 == 0 :
fs = open ( foutn, "w" ); fs.write ( "%d/%d" % (i+1,len(sig_at) ) ); fs.close()
i += 1
fin.close()
fout.close()
fs = open ( foutn, "w" ); fs.write ( "done" ); fs.close()
def CalcQp ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=True, log=False ) :
import multiprocessing
numProc = multiprocessing.cpu_count()/2
M = dmap.data.full_matrix()
minD, maxD = numpy.min(M), numpy.max(M)
print "Q Scores - p - %d" % numProc
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
print " - sigma: %.2f" % sigma
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
import time
start = time.time()
ress = []
atoms = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
for at in r.atoms :
if not at.element.name == "H" :
atoms.append ( at )
print " - atoms to do: %d" % len(atoms)
import subprocess
import sys
mapPath = os.path.split ( dmap.data.path )[0]
mapBase = os.path.splitext (dmap.data.path)[0]
print "Ran:"
print sys.argv
# '/Users/greg/_mol/Chimera.app/Contents/Resources/share/__main__.py'
chiPath = os.path.split ( sys.argv[0] )[0]
mapQPPath = os.path.join ( chiPath, 'Segger' )
mapQPPath = os.path.join ( chiPath, 'mapqp.py' )
print " -- ", mapQPPath
# for Mac
chiPath, share = os.path.split ( chiPath )
print chiPath, share
chiPath2, resOrChim = os.path.split ( chiPath )
print chiPath, resOrChim
if resOrChim == "Chimera" :
print " -- on unix"
chiPath = os.path.join ( chiPath, 'bin' )
chiPath = os.path.join ( chiPath, 'chimera' )
else :
print " -- on mac"
#chiPath2, contents = os.path.split ( chiPath2 )
#print chiPath2, contents
chiPath = os.path.join ( chiPath2, 'MacOS' )
chiPath = os.path.join ( chiPath, 'chimera' )
print " -- chiPath: ", chiPath
dir_path = os.path.dirname(os.path.realpath(__file__))
inDir = os.path.split(dir_path)[0]
print "Working dir: ", inDir
mapQPPath = os.path.join ( inDir, 'Segger' )
mapQPPath = os.path.join ( mapQPPath, 'mapqp.py' )
print " -- ", mapQPPath
mapBase = mapBase + "_qscore_2019_proc"
n = len(atoms)
g = [atoms[(n*c)/numProc:(n*(c+1))/numProc] for c in range(numProc)]
procs = []
for mi, atoms1 in enumerate(g) :
ress1 = atoms1[0].residue
ressN = atoms1[-1].residue
print " - %d/%d, %d-%d" % (mi+1, numProc, ress1.id.position, ressN.id.position)
fout = open ( mapBase + "_%d.txt" % mi, "w" )
for at in atoms1 :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
fout.write ( "%.3f %d.%s.%s.%s\n" % (sigma, r.id.position,r.id.chainId,at.name,altLoc) )
fout.close()
nmap_path = mapBase + "_%d.mrc" % mi
#print " -> ", nmap_path
nmap = MaskMapResize ( atoms1, 4.0, dmap, nmap_path )
#nmap.write_file ( nmap_path , "mrc" )
args = [chiPath, '--nogui', '--silent', '--nostatus', mol.openedAs[0], nmap_path, mapQPPath]
if 0 :
print " - running:",
for arg in args :
print arg,
fout = open ( mapBase + "_%d.log" % mi, "w" )
foute = open ( mapBase + "_%d_err.log" % mi, "w" )
p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=inDir)
procs.append ( [mi, p, fout, foute] )
print ""
print "Waiting...",
for mi, p, fout, foute in procs :
p.wait()
fout.close()
foute.close()
print "%d" % mi,
print ""
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
print ""
print "Getting...",
for mi, p, fout, foute in procs :
fin = mapBase + "_%d_out.txt" % mi
#print " - getting from: ", fin
fp = open ( fin )
for l in fp :
#print " - ", l
atId, cc, ccm = l.split()
at = atids[atId.strip()]
#at = r.atomsMap[atName][0]
at.Q = float(ccm)
at.CC = float(cc)
at.occupancy = at.Q
#print " - removing..."
os.remove ( mapBase + "_%d_out.txt" % mi )
os.remove ( mapBase + "_%d_stat.txt" % mi )
os.remove ( mapBase + "_%d.txt" % mi )
os.remove ( mapBase + "_%d.mrc" % mi )
os.remove ( mapBase + "_%d.log" % mi )
os.remove ( mapBase + "_%d_err.log" % mi )
print "%d" % mi,
print ""
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
for r in ress :
CalcResQ (r, dmap, sigma, useOld=True )
scores, scoresBB, scoresSC, scoresQ, scoresCC = [], [], [], [], []
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
scores.append ( r.Q )
if r.isProt or r.isNA :
if r.bbQ != None : scoresBB.append ( r.bbQ )
if r.scQ != None : scoresSC.append ( r.scQ )
for at in r.atoms :
scoresQ.append ( at.Q )
scoresCC.append ( at.Q )
#sc = [x for x in scores if x is not None]
#scSC = [1.0/x for x in scoresSC if x is not None]
#scBB = [1.0/x for x in scoresBB if x is not None]
print " - residue Q min %.3f max %.3f, avg %.3f" % (min(scores), max(scores), numpy.average(scores))
print " - backbone Q min %.3f max %.3f, avg %.3f" % (min(scoresBB), max(scoresBB), numpy.average(scoresBB))
print " - side chain Q min %.3f max %.3f, avg %.3f" % (min(scoresSC), max(scoresSC), numpy.average(scoresSC))
print " - atom Q min %.3f max %.3f, avg %.3f" % (min(scoresQ), max(scoresQ), numpy.average(scoresQ))
print " - atom CC min %.3f max %.3f, avg %.3f" % (min(scoresCC), max(scoresCC), numpy.average(scoresCC))
return numpy.average(scoresQ), numpy.average(scoresCC)
def RadZ ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=10, toRAD=2.0 ) :
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
#pts = []
#for at in atoms :
# p = at.coord()
# pts.append ( [p[0], p[1], p[2]] )
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg0 = numpy.average ( d_vals )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = toRAD
zscore = None
outRad = RAD*0.9
#outRad2 = outRad * outRad
pts = []
for at in atoms :
npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
npts = int ( npts )
#print RAD, dRAD, numPts, " -> ", npts
outPts = SpherePts ( at.coord(), RAD, npts )
for pt in outPts :
if allAtTree != None :
vPt = numpy.array ( [pt[0], pt[1], pt[2]] )
opointsNear = allAtTree.searchTree ( [pt[0], pt[1], pt[2]], outRad )
if len(opointsNear) > 0 :
if 0 :
clash = False
for p in opointsNear :
v = vPt - p.coord().data()
sqSum = numpy.sum ( v * v )
if sqSum < outRad2 :
clash = True
break
if clash == False :
pts.append ( [pt[0], pt[1], pt[2]] )
else :
pts.append ( [pt[0], pt[1], pt[2]] )
else :
pts.append ( [pt[0], pt[1], pt[2]] )
if show :
AddSpherePts ( pts, (.8,.2,.8,0.5), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
else :
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
sdev = numpy.std ( d_vals )
if sdev < 1e-4 : sdev = 1e-4
zscore = (avg0 - avg) / sdev #(scores[0] - avg) / stdev
#print " - scores: avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore )
if log :
print " - q at rad %.2f, avg0 %.3f, avg %.3f, stdev %.4f, z %.3f, %d pts" % (RAD, avg0, avg, sdev, zscore, len(pts))
return zscore
def MinMaxD ( dmap ) :
M = dmap.data.full_matrix()
maxD = min ( numpy.average(M)+numpy.std(M)*6.0, numpy.max(M) )
minD = max ( numpy.average(M)-numpy.std(M)*2.0, numpy.min(M) )
#minD = numpy.min(M)
#minD, maxD = numpy.min(M), numpy.max(M)
return minD, maxD
def RadCC ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform ).astype(numpy.float64, copy=False)
GV = numpy.average ( d_vals )
g_vals = (numpy.ones ( [len(pts),1] ) * GV).astype(numpy.float64, copy=False)
r_avg = [ [0,GV,len(pts)] ]
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
#npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
#npts = numPts * (RAD*RAD / (dRAD*dRAD))
npts = numPts # 8 # int ( npts )
#print RAD, dRAD, numPts, " -> ", npts
for i in range (0, 100) :
outPts = SpherePts ( at.coord(), RAD, npts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
if 1 :
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
if len(opointsNear) == 0 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= npts or i >= 95 :
pts.extend ( at_pts[0:at_pts_i] )
break
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )
if log :
print "cc: %.3f, ccm: %.3f" % (CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:20] ], float(20)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f\t%f" % (x, y, gv, yds[i])
i += 1
return CC, CCm, yds, err
else :
return CC, CCm
def RadAts ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=20, toRAD=2.0, dRAD=0.1 ) :
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
#pts = []
#for at in atoms :
# p = at.coord()
# pts.append ( [p[0], p[1], p[2]] )
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
RD_, X, Y = [], [], []
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [0,avg] ); X.append (0); Y.append (avg)
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
pts = []
for at in atoms :
npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
npts = int ( npts )
#print RAD, dRAD, numPts, " -> ", npts
outPts = SpherePts ( at.coord(), RAD, npts )
for pt in outPts :
ppt = [pt[0], pt[1], pt[2]]
if allAtTree != None :
vPt = numpy.array ( ppt )
opointsNear = allAtTree.searchTree ( ppt, outRad )
if 1 :
clash = False
for p in opointsNear :
v = vPt - p.coord().data()
sqSum = numpy.sum ( v * v )
if sqSum < outRad2 :
clash = True
break
if clash == False :
pts.append ( ppt )
else :
if len(opointsNear) == 0 :
pts.append ( ppt )
else :
pts.append ( ppt )
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
else :
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [RAD,avg] );
if log :
print RAD, avg, len(pts)
X.append (RAD); Y.append (avg)
RAD += dRAD
#minSd = opt0 ( RD_, 0.1 )
#if minSd != None :
# if show :
# print " SD0: %.1f" % minSd
sdev = toRAD
slope = 0
if RD_[0][1] <= RD_[-1][1] :
sdev = 10.0
else :
#for i in range ( len(RD_) ) :
# RD_[i][1] = RD_[i][1] - RD_[-1][1]
# if log :
# Y[i] = Y[i] - Y[-1]
#import time
#start = time.time()
sdev, A, B = optSGD ( RD_, 9000, 0.2 )
sdev, A, B = optSGD ( RD_, 9000, 0.02, sdev, A, B )
sdev, A, B = optSGD ( RD_, 9000, 0.002, sdev, A, B )
#end = time.time()
#if log : print " sgd - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
sdev = sdev
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#start = time.time()
#sdev, A, B = optGN ( RD_, 0.0001 )
#print " gn - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
#end = time.time()
if 1 :
if 0 and sdev != None :
if log :
print " gn1 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
else :
sdev, A, B = optSGD ( RD_, 10000, 0.01 )
if log :
print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
sdev2, A2, B2 = optGN ( RD_, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
if log :
print " gn2 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#else :
# return 10.0
if log :
r = numpy.polyfit ( X, Y, 1, rcond=None, full=False, w=None, cov=False)
print " sdev: %.4f, A %.4f, B %.4f // slope: %.4f y %.4f" % (sdev, A, B, r[0], r[1])
#A, B = 0.26+0.08, -0.08
lastX = 0
for i in range ( len(RD_) ) :
x, y = RD_[i]
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t%f\t%f\t%f" % (x, y, gv, gvRef)
lastX = x
if 1 :
x = lastX + dRAD
#while x < min(4 * sdev,50.0) :
while x < min(10.0,50.0) :
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t\t%f\t%f" % (x, gv, gvRef)
x += dRAD
#return abs(sdev), abs(slope)
return abs(sdev)
def optGN ( V, err, S=None, A=None, B=None ) :
y0 = V[0][1]
yN = V[-1][1]
if S == None :
S = 0.5
A = y0+yN
B = yN
an = numpy.array ( [A,B,S] )
#print " _ -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
badMatCount = 0
for i in range ( 1000 ) :
J = numpy.zeros ( [len(V),3] )
e = numpy.zeros ( [len(V),1] )
err0 = 0
j = 0
for x,y in V :
expv = numpy.exp ( -0.5 * numpy.power(x/S,2) )
v = A * expv + B
yd = v - y
err0 += yd * yd
#print "%.2f,%.2f/%.2f(%.2f)" % (x, y, v, yd),
dA = expv
dB = 1
dS = A*x*x*numpy.power(S,-3) * expv
J[j,:] = [dA, dB, dS]
e[j,0] = yd
j += 1
Jt = numpy.transpose(J)
try :
J_ = numpy.dot ( numpy.linalg.inv ( numpy.dot(Jt,J) ), Jt )
except :
#print " - bad matrix?"
#print numpy.dot(Jt,J)
badMatCount += 1
if badMatCount > 3 :
return None, None, None
from numpy import random as R
an = numpy.array ( [R.random()*(y0+yN),R.random()*yN,R.random()*10.0] )
A,B,S = an[0], an[1], an[2]
#print " ? -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
continue
ad = numpy.dot ( J_, e )
ann = an - ( ad[:,0] * reg )
A,B,S = ann[0], ann[1], ann[2]
err1 = err3 ( V, S, A, B )
#if err1 > err0 :
# reg = reg * 0.1
# if reg < err :
# break
#else :
an = ann
#print " %d -- A %.3f B %.3f s %.3f - err %.3f, reg %.5f" % (i, A, B, S, err1, reg)
if abs(err0 - err1) < err :
#print " - done"
break
i += 1
return S,A,B
def optSGD ( V, N, err, S=None, A=None, B=None ) :
if S == None :
y0 = V[0][1]
yN = V[-1][1]
S = 0.5
A = y0+yN
B = yN
from numpy import random
lastE = err3 ( V, S, A, B )
#while True :
for i in range(N) :
S_ = S + random.normal ( 0, err ) # mean, sigma
A_ = A + random.normal ( 0, err ) # mean, sigma
B_ = B + random.normal ( 0, err ) # mean, sigma
e = err3 ( V, S_, A_, B_ )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e < lastE :
S, A, B = S_, A_, B_
lastE = e
return S,A,B
def err3 ( XYz, sd, A, B ) :
y0 = XYz[0][1]
err = 0
#for x,y in XYz[1:] :
for x,y in XYz :
yd = y - A * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) - B
err += yd * yd
#err /= float(len(XYz))
return err
def err ( XYz, sd ) :
y0 = XYz[0][1]
err = 0
for x,y in XYz[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
#err /= float(len(XYz))
return err
def opt0 ( RD_, dStep ) :
sd = 0.1
y0 = RD_[0][1]
minSd, minErr, N = None, 1e99, float ( len(RD_)-1 )
while sd < 10.0 :
err = 0
for x,y in RD_[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
err /= N
#print err
if err < minErr :
minErr = err
minSd = sd
sd += dStep
def opt ( V, maxErr ) :
dd = 1.0
sdAt = 0.1
lastE = err ( V, sdAt )
#while True :
for i in range(10000) :
sdAt += dd
e = err ( V, sdAt )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e >= lastE :
dd *= -0.75
if abs(dd) < maxErr :
return sdAt
lastE = e
return sdAt
def CurMolAndChain () :
segModDialog = mapq_dialog ()
if segModDialog != None :
if segModDialog.cur_mol == None :
segModDialog.cur_mol = chimera.Molecule()
segModDialog.cur_mol.name = "Model"
#chimera.openModels.add ( [mol], noprefs = True )
chimera.openModels.add ( [segModDialog.cur_mol] )
segModDialog.struc.set ( segModDialog.cur_mol.name )
try :
segModDialog.cur_mol.openState.xform = chimera.openModels.list()[0].openState.xform
except :
pass
chainId = segModDialog.chain.get()
if len(chainId) == 0 :
chainId = "A"
segModDialog.chain.set ( chainId )
return segModDialog.cur_mol, chainId
return None, ""
def VisMapMod () :
mol, map = None, None
for m in OML(modelTypes = [chimera.Molecule]) :
if m.display :
mol = m
for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :
if m.display :
map = m
return map, mol
def ZScoresVis ( ) :
map, mol = VisMapMod()
if mol != None and map != None :
ZScores ( mol, map)
else :
print "Did not find visible mol and map"
def ZScores ( mol, map ) :
resolution = 3.0 * map.data.step[0]
print "Mol: %s, Map: %s -- res %.1f" % (mol.name, map.name, resolution)
SetBBAts ( mol )
cmap = {}
for r in mol.residues :
if r.id.chainId in cmap :
cmap[r.id.chainId].append ( [r.id.position, r] )
else :
cmap[r.id.chainId] = [ [r.id.position, r] ]
#ress = cmap['0']
allBB, allSC = [], []
for cid, ress in cmap.iteritems() :
print " - chain %s" % cid
ress.sort ()
ares = [el[1] for el in ress]
zscores = []
if 0 :
sses = SSEs ( ares )
for el in sses :
si, ei, ss, elRess = el
zscore, ccs = zBB ( mol, elRess, resolution, map )
#print ss, si, "-", ei, zscore
if zscore != None :
zscores.append ( zscore )
for r in elRess :
r.bbZ = zscore
else :
bbs = BBsegs ( self.seqRes )
W = 3
print " - %d BB segments" % len(bbs)
for bb in bbs :
print " %d res, %d-%d" % (len(bb),bb[0].id.position,bb[-1].id.position)
for ri, r in enumerate ( bb ) :
firstRi = max ( 0, ri-(W-1)/2 )
lastRi = min ( len(bb)-1, ri+(W-1)/2 )
ress = bb[firstRi:lastRi+1]
zscore, ccs = zBB ( self.cur_mol, ress, resolution, map )
if zscore != None :
zscores.append ( zscore )
avgBB = 0
if len(zscores) > 0 :
avgBB = numpy.average(zscores)
allBB.extend ( zscores )
#print " - BB - min %.2f max %.2f, avg %.2f" % (min(zscores), max(zscores), avgBB )
#else :
# print " - BB - no zscores?"
avgSC = 0
zscores = CalcRotaZ ( map, mol, ares )
if len(zscores) > 0 :
avgSC = numpy.average(zscores)
#print " - SC - min %.2f max %.2f, avg %.2f" % (min(zscores), max(zscores), numpy.average(zscores) )
allSC.extend ( zscores )
#else :
# print " - SC - no zscores?"
print "Chain %s - %d res - avgBB %.2f, avgSC %.2f" % ( cid, len(ares), avgBB, avgSC )
print ""
avgBB = 0
if len(avgBB) > 0 :
avgBB = numpy.average(allBB)
print "BB All - %d scores - min %.2f max %.2f, avg %.2f" % (len(allBB), min(allBB), max(allBB), avgBB )
else :
print "BB - no zscores?"
avgSC = 0
if len(allSC) > 0 :
avgSC = numpy.average(allSC)
print "SC All - %d scores - min %.2f max %.2f, avg %.2f" % (len(allSC), min(allSC), max(allSC), avgSC )
else :
print "SC - no zscores?"
print ""
def BBsegs ( ress ) :
bbs = []
firstRi, atRi = 0, 1
for r in ress[1:] :
if ress[atRi].id.position > ress[atRi-1].id.position + 1 or r.rtype == "?" :
bbs.append ( ress[firstRi:atRi] )
firstRi = atRi
atRi += 1
bbs.append ( ress[firstRi:atRi] )
return bbs
def SSEs ( allRess ) :
if len(allRess) < 1 :
return []
sses, ss = [], ""
res, rStart = allRess[0], allRess[0]
#print " - at first res / pos: %d " % res.id.position
if res.isHelix :
ss = "H"
elif res.isSheet or res.isStrand :
ss = "E"
else :
ss = "_"
ress = [ res ]
lastRes = rStart
for res in allRess [1:] :
if res.id.position > lastRes.id.position + 1 :
print " - gap at", res.id.position
sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )
ress = []
rStart = res
if res.isHelix :
ss = "H"
elif res.isSheet or res.isStrand :
ss = "E"
else :
ss = "_"
if res.isHelix :
if ss != "H" :
#print "%s -> H - at %d rid %d | %d->%d, %d res" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))
sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )
ress = []
rStart = res
ss = "H"
elif res.isSheet or res.isStrand :
if ss != "E" :
#print "%s -> E - at %d rid %d | %d->%d, %d res" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))
sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )
ress = []
rStart = res
ss = "E"
else :
if ss == "H" or ss == "E" :
#print "%s -> _ at %d rid %d | %d->%d, %d res" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))
sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )
ress = []
rStart = res
ss = "_"
ress.append ( res )
lastRes = res
#print "Done at rid %d - %s | %d->%d, %d res" % ( res.id.position, ss, rStart.id.position, res.id.position, len(ress))
sses.append ( [rStart.id.position, res.id.position, ss, ress] )
return sses
def CalcRotaZ ( dmap, mol, ress ) :
A = []
resolution = 3.0 * dmap.data.step[0]
for ri, res in enumerate ( ress ) :
if 1 :
if res.isProt :
res.scZ, cc = zRotSideChain ( mol, res, resolution, dmap, show=False )
elif res.isNA :
res.scZ = zRotBase ( mol, res, resolution, dmap, show=False )
else :
print "?_%d.%s_%s" % (res.id.position, res.id.chainId, res.type)
res.scZ = 0
res.scQ = res.scZ
else :
res.scZ = zShakeSC ( mol, res, resolution, dmap, show=False )
if res.scZ != None :
A.append ( res.scZ )
#avgA, stdA = numpy.average ( A ), numpy.std ( A )
#umsg ( "Avg side chain Z-score: %.3f" % ( avgA ) )
return A
def MoveSC () :
map, mol = VisMapMod()
resolution = 3.0 * map.data.step[0]
print "Map: %s, mol: %s" % (map.name, mol.name)
res = chimera.selection.currentResidues()[0]
print " - res: %s %d.%s" % (res.type, res.id.position, res.id.chainId)
z = None
if 1 :
if res.isProt :
z, cc = zRotSideChain ( mol, res, resolution, map, True )
elif res.isNA :
z = zRotBase ( mol, res, resolution, map, True )
else :
z = zShakeSC ( mol, res, resolution, map, True )
print z
def score3 (R) :
selAts = chimera.selection.currentAtoms()
if len ( selAts ) == 0 :
return
dmap = mapq_dialog ().cur_dmap
a = selAts[0]
r = a.residue
print "Res: %s - %d.%s - %s - Atom: %s" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)
if not hasattr ( r.molecule, 'bbats' ) :
SetBBAts(r.molecule)
r.molecule.bbats = True
removeMods = []
for m in chimera.openModels.list() :
if "RAD points" in m.name :
removeMods.append ( m )
chimera.openModels.remove ( removeMods )
ats = [at for at in self.cur_mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(r.molecule.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
#print "-"
import time
start = time.time()
#r.sdev = RadAts ( selAts, dmap, allAtTree=allAtTree, show=1, log=0, numPts=40, toRAD=2, dRAD=0.5 )
r.sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=20, toRAD=2, dRAD=0.5 )
end = time. time()
print "%s - rad: %.3f, time: %f" % ( a.name, r.sigma, (end - start) )
scZ, cc = zRotSideChain ( r.molecule, r, R, dmap, show=False )
print " - cc %.3f, scZ %.3f " % (cc, scZ)
print "%f\t%f\t%f" % (r.sigma, cc, scZ)
def zShakeSC ( mol, res, resolution, dmap, show=False ) :
atoms = res.scAtoms
if len(atoms) < 1 :
#print " - no sc atoms" % len(atoms)
return None
score0 = 0
scores, scorest = [], []
T = 1
trange = [-T*1.0, 0.0, T*1.0]
#trange = [-T*2.0, -T, 0.0, T, T*2.0]
fout = None
if show :
fout = open ("/Users/greg/Desktop/sc.txt", "w")
moved = False
for xx in trange :
for yy in trange :
for zz in trange :
v = chimera.Vector(xx,yy,zz)
xfT = chimera.Xform.translation ( chimera.Vector(xx,yy,zz) )
molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], xfT )
fpoints, fpoint_weights = fit_points_g ( molg )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
if numpy.fabs(xx) < .01 and numpy.fabs(yy) < .01 and numpy.fabs(zz) < .01 :
score0 = corr1
else :
scores.append ( corr1 )
if fout :
#if not moved :
nmol, cress = CopyRess ( [res] )
for nr in cress :
for nat in nr.atoms :
try :
nat.setCoord ( xfT.apply ( nat.coord() ) )
except :
pass
#chimera.openModels.add ( [nmol] )
nmol.name = "S_%.0f_%.0f_%.0f" % (xx,yy,zz)
moved = True
scorest.append ( [corr1, [xx,yy,zz], nmol] )
if fout :
scorest.sort ()
#scorest.reverse ()
scorest = scorest[0:len(scorest)/2]
if fout :
fout.write ( "%.0f,%.0f,%.0f\t%f\n" % (0,0,0, score0) )
for sc, t, nmol in scorest:
fout.write ( "%.0f,%.0f,%.0f\t%f\n" % (t[0],t[1],t[2], sc) )
chimera.openModels.add ( [nmol] )
SetBBAts ( nmol )
for at in nmol.atoms :
at.display = at.isSC
fout.close()
if 1 :
scores.sort ()
#scores.reverse ()
scores = scores[0:len(scores)/2]
#print ""
avg = numpy.average ( scores ) #numpy.average ( scores[1:] )
stdev = numpy.std ( scores ) #numpy.std ( scores[1:] )
if stdev < 1e-8 :
#print " - nostdev"
return None
zscore = (score0 - avg) / stdev #(scores[0] - avg) / stdev
#print " - scores: avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore )
#fout.close()
return zscore
def zRotSideChain ( mol, r, resolution, dmap, show=False ) :
r.CA, r.CB, r.CG = None, None, None
try :
r.CA = r.atomsMap["CA"][0]
r.CB = r.atomsMap["CB"][0]
except :
pass
if "CG" in r.atomsMap :
r.CG = r.atomsMap["CG"][0]
elif "CG1" in r.atomsMap :
r.CG = r.atomsMap["CG1"][0]
elif "CG2" in r.atomsMap :
r.CG = r.atomsMap["CG2"][0]
elif "OG" in r.atomsMap :
r.CG = r.atomsMap["OG"][0]
elif "SG" in r.atomsMap :
r.CG = r.atomsMap["SG"][0]
if r.CA == None or r.CB == None or r.CG == None :
#print r.type, " - no ats"
return None, None
resolution = 3.0 * dmap.data.step[0]
scores = []
#molg = MyMolMap ( mol, r.atoms, resolution, dmap.data.step[0] )
#fpoints, fpoint_weights = fit_points_g ( molg )
#map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
#olap_0, corr1_0, corr2_0 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
rats = r.scAtoms
nrats = []
for at in rats :
try :
at.p0 = at.coord()
nrats.append ( at )
except :
pass
fout = None
if show :
fout = open ("/Users/greg/Desktop/sc.txt", "w")
#for ri, rmol in enumerate ( rmols[0:10] ) :
for deg in range (0, 360, 36) :
RotAts ( nrats, r.CA, r.CB, deg )
if fout :
nmol, cress = CopyRess ( [r] )
chimera.openModels.add ( [nmol] )
nmol.name = "SC %d %.0f" % (r.id.position, deg)
nr = nmol.residues[0]
SetBBAts ( nmol )
for at in nr.atoms :
if at.isBB :
at.display = False
else :
at.display = True
corr = ResCC ( mol, nrats, resolution, dmap )
scores.append ( corr )
for at in nrats :
at.setCoord ( at.p0 )
if fout :
for sci, sc in enumerate ( scores ):
fout.write ( "%d\t%f\n" % (sci*36, sc) )
fout.close()
zscore1 = None
if len(scores) > 3 :
avg = numpy.average ( scores[1:] )
stdev = numpy.std ( scores[1:] )
zscore1 = ( (scores[0] - avg) / stdev ) if stdev > 1e-5 else 0
#print " -0- avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore1 )
#print scores
#print " -1- avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore1 )
return zscore1, scores[0]
def zRotBase ( mol, r, resolution, dmap, show=False ) :
resolution = 3.0 * dmap.data.step[0]
scores = []
rats = r.scAtoms
nrats = []
for at in rats :
try :
if at.element.name == "H" :
continue
at.p0 = at.coord()
nrats.append ( at )
except :
pass
fout = None
if show :
fout = open ("/Users/greg/Desktop/sc.txt", "w")
#for ri, rmol in enumerate ( rmols[0:10] ) :
for deg in range (0, 360, 36) :
RotAts ( nrats, r.atomsMap["C1'"][0], r.baseAt, deg )
if fout :
nmol, cress = CopyRess ( [r] )
chimera.openModels.add ( [nmol] )
nmol.name = "SC %d %.0f" % (r.id.position, deg)
nr = nmol.residues[0]
SetBBAts ( nmol )
for at in nr.atoms :
if at.isBB :
at.display = False
else :
at.display = True
corr = ResCC ( mol, nrats, resolution, dmap )
scores.append ( corr )
for at in nrats :
at.setCoord ( at.p0 )
if fout :
for sci, sc in enumerate ( scores ):
fout.write ( "%d\t%f\n" % (sci*36, sc) )
fout.close()
zscore1 = None
if len(scores) > 3 :
avg = numpy.average ( scores[1:] )
stdev = numpy.std ( scores[1:] )
zscore1 = ( (scores[0] - avg) / stdev ) if stdev > 1e-5 else 0
#print " -1- avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore1 )
return zscore1
def MoveBB () :
map, mol = VisMapMod()
resolution = 3.0 * map.data.step[0]
print "Map: %s, mol: %s" % (map.name, mol.name)
z, cc = zBB ( mol, chimera.selection.currentResidues(), resolution, map, True )
print z
def zBB ( mol, ress, resolution, dmap, show=False ) :
atoms = []
for r in ress :
#if 'C' in r.atomsMap : atoms.append ( r.atomsMap['C'][0] )
#if 'N' in r.atomsMap : atoms.append ( r.atomsMap['N'][0] )
#if 'CA' in r.atomsMap : atoms.append ( r.atomsMap['CA'][0] )
#if 'O' in r.atomsMap : atoms.append ( r.atomsMap['O'][0] )
atoms.extend ( r.bbAtoms )
atoms.extend ( r.scAtoms )
if len(atoms) < 1 :
#print " - no atoms" % len(atoms)
return [0,0]
score0 = 0
scores, scorest = [], []
T = 2
trange = [-T*1.0, 0.0, T*1.0]
#trange = [-T*2.0, -T, 0.0, T, T*2.0]
fout = None
if show :
fout = open ("/Users/greg/Desktop/sse.txt", "w")
moved = False
for xx in trange :
for yy in trange :
for zz in trange :
v = chimera.Vector(xx,yy,zz)
xfT = chimera.Xform.translation ( chimera.Vector(xx,yy,zz) )
molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], xfT )
fpoints, fpoint_weights = fit_points_g ( molg )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
if numpy.fabs(xx) < .01 and numpy.fabs(yy) < .01 and numpy.fabs(zz) < .01 :
score0 = corr2
else :
scores.append ( corr2 )
if fout :
scorest.append ( [corr2, [xx,yy,zz]] )
if not moved :
nmol, cress = CopyRess ( ress )
for nr in cress :
for nat in nr.atoms :
try :
nat.setCoord ( xfT.apply ( nat.coord() ) )
except :
pass
chimera.openModels.add ( [nmol] )
nmol.name = "T_%.0f_%.0f_%.0f" % (xx,yy,zz)
moved = True
if fout :
scorest.sort ()
scorest.reverse ()
scorest = scorest[len(scorest)/2:]
if fout :
fout.write ( "%.0f,%.0f,%.0f\t%f\n" % (0,0,0, score0) )
for sc, t in scorest:
fout.write ( "%.0f,%.0f,%.0f\t%f\n" % (t[0],t[1],t[2], sc) )
fout.close()
if 0 :
scores.sort ()
scores.reverse ()
scores = scores[len(scores)/2:]
#print ""
avg = numpy.average ( scores ) #numpy.average ( scores[1:] )
stdev = numpy.std ( scores ) #numpy.std ( scores[1:] )
if stdev < 1e-8 :
#print " - nostdev"
return [0,0]
zscore = (score0 - avg) / stdev #(scores[0] - avg) / stdev
#print " - scores: avg %.4f, std %.4f, z-score %.4f" % (avg, stdev, zscore )
#fout.close()
return [zscore, score0]
def CopyRess ( res ) :
nmol = chimera.Molecule()
ress = [None] * len ( res )
aMap = dict()
for ri, r in enumerate ( res ) :
nres = nmol.newResidue (r.type, chimera.MolResId(r.id.chainId, r.id.position))
ress[ri] = nres
for at in r.atoms :
nat = nmol.newAtom (at.name, chimera.Element(at.element.number))
aMap[at] = nat
nres.addAtom( nat )
p = chimera.Point ( at.coord().x, at.coord().y, at.coord().z )
nat.setCoord ( p )
nat.coord0 = chimera.Point ( at.coord().x, at.coord().y, at.coord().z )
#if at.name == "C" or at.name == 'CA' or at.name == 'O' or at.name == "N" :
# at.display = False
for bond in res[0].molecule.bonds :
try :
nb = nmol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )
nb.display = nb.Smart
except :
pass
for r in ress :
r.CA, r.CB, r.CG = None, None, None
try :
r.CA = r.atomsMap["CA"][0]
r.CB = r.atomsMap["CB"][0]
r.CG = r.atomsMap["CG"][0]
except :
pass
return nmol, ress
def RotAts (rats, a1, a2, deg) :
# phi: N -> CA
p1, p2 = a1.coord(), a2.coord()
v = p2 - p1; v.normalize()
xf = chimera.Xform.translation ( p1.toVector() )
xf.multiply ( chimera.Xform.rotation ( v, deg ) )
xf.multiply ( chimera.Xform.translation ( p1.toVector() * -1.0 ) )
#for at in res.atoms :
# if at.name != 'C' and at.name != 'CA' and at.name != 'N' and at.name != 'CB' and at.name != 'O' :
for at in rats :
at.setCoord ( xf.apply (at.coord()) )
def molecule_grid_dataX (m0, atoms, resolution, step, pad, xfT, cutoff_range, sigma_factor, transforms = [], csys = None):
from _multiscale import get_atom_coordinates
xyz = get_atom_coordinates(atoms, transformed = True)
# Transform coordinates to local coordinates of the molecule containing
# the first atom. This handles multiple unaligned molecules.
# Or if on_grid is specified transform to grid coordinates.
#m0 = atoms[0].molecule
xf = m0.openState.xform
xf.multiply ( xfT )
import Matrix as M
M.transform_points(xyz, M.xform_matrix(xf.inverse()))
if csys:
xf.premultiply(csys.xform.inverse())
tflist = M.coordinate_transform_list(transforms, M.xform_matrix(xf))
anum = [a.element.number for a in atoms]
molecules = set([a.molecule for a in atoms])
if len(molecules) > 1:
name = 'molmap res %.3g' % (resolution,)
else:
name = 'molmap %s res %.3g' % (m0.name, resolution)
grid = bounding_grid(xyz, step, pad, tflist)
grid.name = name
sdev = resolution * sigma_factor
add_gaussians(grid, xyz, anum, sdev, cutoff_range, tflist)
#return grid, molecules
return grid
def MyMolMapX ( m0, atoms, resolution, step, xf ) :
#from MoleculeMap import molecule_grid_data
from math import sqrt, pi
from chimera import openModels as om
from VolumeViewer import volume_from_grid_data
atoms = tuple(atoms)
pad = 3*resolution
cutoff_range = 5 # in standard deviations
sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution
transforms,csys = [], None
display_threshold = 0.95
return molecule_grid_dataX (m0, atoms, resolution, step, pad, xf, cutoff_range, sigma_factor, transforms, csys)
def MyMolMap ( m0, atoms, resolution, step ) :
#from MoleculeMap import molecule_grid_data
from math import sqrt, pi
from chimera import openModels as om
from VolumeViewer import volume_from_grid_data
atoms = tuple(atoms)
pad = 3*resolution
cutoff_range = 5 # in standard deviations
sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution
transforms,csys = [], None
display_threshold = 0.95
return molecule_grid_data(m0, atoms, resolution, step, pad, None, cutoff_range, sigma_factor, transforms, csys)
def molecule_grid_data(m0, atoms, resolution, step, pad, on_grid,
cutoff_range, sigma_factor,
transforms = [], csys = None):
from _multiscale import get_atom_coordinates
xyz = get_atom_coordinates(atoms, transformed = True)
# Transform coordinates to local coordinates of the molecule containing
# the first atom. This handles multiple unaligned molecules.
# Or if on_grid is specified transform to grid coordinates.
#m0 = atoms[0].molecule
xf = on_grid.openState.xform if on_grid else m0.openState.xform
import Matrix as M
M.transform_points(xyz, M.xform_matrix(xf.inverse()))
if csys:
xf.premultiply(csys.xform.inverse())
tflist = M.coordinate_transform_list(transforms, M.xform_matrix(xf))
anum = [a.element.number for a in atoms]
molecules = set([a.molecule for a in atoms])
if len(molecules) > 1:
name = 'molmap res %.3g' % (resolution,)
else:
name = 'molmap %s res %.3g' % (m0.name, resolution)
if on_grid:
from numpy import float32
grid = on_grid.region_grid(on_grid.region, float32)
else:
grid = bounding_grid(xyz, step, pad, tflist)
grid.name = name
sdev = resolution * sigma_factor
add_gaussians(grid, xyz, anum, sdev, cutoff_range, tflist)
#return grid, molecules
return grid
def ResCC ( mol, rats, resolution, dmap ) :
molg = MyMolMap ( mol, rats, resolution, dmap.data.step[0] )
#if 0 :
# fmap = VolumeViewer.volume.volume_from_grid_data ( molg )
# fmap.name = "res molmap!"
# fpoints, fpoint_weights = fit_points(fmap, False)
# map_values = dmap.interpolated_values ( fpoints, fmap.openState.xform )
# olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
# scores.append ( corr1 )
# chimera.openModels.close ( [fmap] )
#else :
fpoints, fpoint_weights = fit_points_g ( molg )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
return corr1
def fit_points_g (fdata, threshold = 1e-5):
mat = fdata.full_matrix()
import _volume
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
nz = numpy.nonzero( fpoint_weights )[0]
if len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
transform_vertices( fpoints, fdata.ijk_to_xyz_transform )
if 0 : print "FitPoints from %s with threshold %.4f, %d nonzero" % (
fmap.name, threshold, len(nz) )
return fpoints, fpoint_weights
def fit_points (fmap, threshold = 1e-5):
mat = fmap.data.full_matrix()
import _volume
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
nz = numpy.nonzero( fpoint_weights )[0]
if len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
from _contour import affine_transform_vertices as transform_vertices
transform_vertices( fpoints, fmap.data.ijk_to_xyz_transform )
#transform_vertices ( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )
if 0 : print "FitPoints from %s with threshold %.4f, %d nonzero" % (
fmap.name, threshold, len(nz) )
return fpoints, fpoint_weights
# -----------------------------------------------------------------------------
#
def bounding_grid(xyz, step, pad, transforms):
xyz_min, xyz_max = point_bounds(xyz, transforms)
origin = [x-pad for x in xyz_min]
from math import ceil
shape = [int(ceil((xyz_max[a] - xyz_min[a] + 2*pad) / step)) for a in (2,1,0)]
from numpy import zeros, float32
matrix = zeros(shape, float32)
from VolumeData import Array_Grid_Data
grid = Array_Grid_Data(matrix, origin, (step,step,step))
return grid
# -----------------------------------------------------------------------------
#
def add_gaussians(grid, xyz, weights, sdev, cutoff_range, transforms = []):
from numpy import zeros, float32, empty
sdevs = zeros((len(xyz),3), float32)
for a in (0,1,2):
sdevs[:,a] = sdev / grid.step[a]
import Matrix as M
if len(transforms) == 0:
transforms = [M.identity_matrix()]
from _gaussian import sum_of_gaussians
ijk = empty(xyz.shape, float32)
matrix = grid.matrix()
for tf in transforms:
ijk[:] = xyz
M.transform_points(ijk, M.multiply_matrices(grid.xyz_to_ijk_transform, tf))
sum_of_gaussians(ijk, weights, sdevs, cutoff_range, matrix)
from math import pow, pi
normalization = pow(2*pi,-1.5)*pow(sdev,-3)
matrix *= normalization
# -----------------------------------------------------------------------------
#
def point_bounds(xyz, transforms = []):
from _multiscale import bounding_box
if transforms :
from numpy import empty, float32
xyz0 = empty((len(transforms),3), float32)
xyz1 = empty((len(transforms),3), float32)
txyz = empty(xyz.shape, float32)
import Matrix as M
for i, tf in enumerate(transforms) :
txyz[:] = xyz
M.transform_points(txyz, tf)
xyz0[i,:], xyz1[i,:] = bounding_box(txyz)
xyz_min, xyz_max = xyz0.min(axis = 0), xyz1.max(axis = 0)
else:
xyz_min, xyz_max = bounding_box(xyz)
return xyz_min, xyz_max
# ---------------------------------------------------------------------------------
def SkinMap ( atoms, bones, N, dmap, atomRad, nname, showMesh = False ) :
from _multiscale import get_atom_coordinates
points = get_atom_coordinates ( atoms, transformed = True )
import _contour
points0 = numpy.copy ( points )
_contour.affine_transform_vertices ( points0, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
nn3, nn2, nn1 = dmap.data.size
npoints = VolumeData.grid_indices ( (int(nn1), int(nn2), int(nn3) ), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( npoints, dmap.data.ijk_to_xyz_transform )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )
for bo in bones :
bo.MakeFrame ()
if N == 1 :
for pi, p in enumerate ( npoints ) :
cbone, minDist = None, 1e9
for bo in bones :
d = bo.DistToPoint ( p )
if d < minDist :
minDist = d
cbone = bo
pt = cbone.SkinPoint ( p )
npoints[pi] = pt
else :
for pi, p in enumerate ( npoints ) :
dbos = []
for bo in bones :
dbos.append ( [bo.DistToPoint ( p ), bo] )
dbos.sort()
totD = 0.0
sp = numpy.array ( [0,0,0] )
for i in range ( N ) :
d, bo = dbos[i]
sp = sp + numpy.array ( bo.SkinPoint ( p ) ) * d
totD += d
npoints[pi] = sp / totD
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
nmat = dvals.reshape( (nn3,nn2,nn1) )
ndata = VolumeData.Array_Grid_Data ( nmat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )
mdata = VolumeData.zone_masked_grid_data ( ndata, points0, atomRad )
MapFromData ( mdata, nname, dmap, False )
if showMesh :
MapFromData ( mdata, nname, dmap, True )
def ExtractDen ( atoms, dmap, nname, boundRad = 2.0, showMesh = False) :
from _multiscale import get_atom_coordinates
points1 = get_atom_coordinates ( atoms, transformed = False )
#COM, U, S, V = prAxes ( points )
bound = 4.0
li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
n1 = int ( numpy.ceil ( (hi - li + 1) / nstep[0] ) )
n2 = int ( numpy.ceil ( (hj - lj + 1) / nstep[1] ) )
n3 = int ( numpy.ceil ( (hk - lk + 1) / nstep[2] ) )
O = chimera.Point ( li, lj, lk )
#O = atoms[0].molecule.openState.xform.apply ( O )
#print " - new map origin:", nO
npoints = VolumeData.grid_indices ( (n1, n2, n3), numpy.single) # i,j,k indices
S = dmap.data.step
_contour.affine_transform_vertices ( npoints, ((S[0], 0.0, 0.0, O[0]), (0.0, S[1], 0.0, O[1]), (0.0, 0.0, S[1], O[2])) )
#_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
dvals = dmap.interpolated_values ( npoints, atoms[0].molecule.openState.xform )
nmat = dvals.reshape( (n3,n2,n1) )
ndata = VolumeData.Array_Grid_Data ( nmat, O, nstep, dmap.data.cell_angles, name = nname )
#_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )
#_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( ndata, points1, boundRad )
dmap = MapFromData ( mdata, nname, dmap, False )
dmap.openState.xform = atoms[0].molecule.openState.xform
dmesh = None
if showMesh :
dmesh = MapFromData ( mdata, nname, dmap, True )
dmesh.openState.xform = atoms[0].molecule.openState.xform
return [dmap, dmesh]
def BoneMap ( bone, dmap, atomRad, nname, show = False, showMesh = False ) :
#_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )
from _multiscale import get_atom_coordinates
atoms = [bone.a1, bone.a2]
points = get_atom_coordinates ( atoms, transformed = True )
import _contour
points1 = numpy.copy ( points )
_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
points0 = numpy.copy ( points1 )
_contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )
bound = int ( numpy.ceil( atomRad / dmap.data.step[0] ) ) + 1
li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li,lj,lk, hi,hj,hk, n1,n2,n3 )
#nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )
#dmat = dmap.full_matrix()
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
#nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
#print " - %s origin:" % dmap.name, O
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
#print " - new map origin:", nO
wmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( wmat, nO, nstep, dmap.data.cell_angles )
npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
npointsi = numpy.copy ( npoints )
_contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )
for pi, p in enumerate ( npoints ) :
i,j,k = npointsi[pi]
d = bone.DistToPoint ( p )
if d < atomRad :
wmat[k,j,i] = 1.0
else :
wmat[k,j,i] = 1.0 / numpy.power (1+d-atomRad,8)
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
nmat = dvals.reshape( (nn3,nn2,nn1) )
bone.ndata = VolumeData.Array_Grid_Data ( nmat*wmat, nO, nstep, dmap.data.cell_angles, name = nname )
bone.xfmod = dmap
if show :
from random import random as rand
clr = ( rand()*.5+.1, rand()*.5+.1, rand()*.5+.1 )
bone.dmap = MapFromData ( bone.ndata, nname, dmap, showMesh, color = clr )
bone.dmap.openState.xform = dmap.openState.xform
def MoldMap ( atoms, bones, dmap, nname, showMesh = False ) :
ndata = dmap.data
nn3, nn2, nn1 = dmap.data.size
nO = dmap.data.origin
nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
if 1 :
ndata = DataForAtoms ( atoms, dmap )
npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
#_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )
#_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )
for bone in bones :
npointsc = numpy.copy ( npoints )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf().inverse() ) )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf0() ) )
#_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )
#_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
_contour.affine_transform_vertices ( npointsc, bone.ndata.xyz_to_ijk_transform )
p2mt = Matrix.xform_matrix ( chimera.Xform.identity() )
#dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.dmap.data.matrix(), method='linear' )
dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.ndata.matrix(), method='linear' )
bmat = dvals.reshape( (nn3,nn2,nn1) )
#nmat = nmat + bmat
nmat = numpy.maximum ( nmat, bmat )
#nmat = nmat / float ( len(bones) )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )
MapFromData ( ndata, nname, dmap, False )
if showMesh :
MapFromData ( ndata, nname, dmap, True )
def MoldMap2 ( bones, dmap, dmesh ) :
ndata = dmap.data
nn1, nn2, nn3 = dmap.data.size
nO = dmap.data.origin
nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )
_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix (bones[0].a1.molecule.openState.xform.inverse()) )
for bone in bones :
npointsc = numpy.copy ( npoints )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf().inverse() ) )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf0() ) )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix (bone.a1.molecule.openState.xform) )
_contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
_contour.affine_transform_vertices ( npointsc, bone.ndata.xyz_to_ijk_transform )
p2mt = Matrix.xform_matrix ( chimera.Xform.identity() )
#dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.dmap.data.matrix(), method='linear' )
dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.ndata.matrix(), method='linear' )
bmat = dvals.reshape( (nn3,nn2,nn1) )
#nmat = nmat + bmat
nmat = numpy.maximum ( nmat, bmat )
#nmat = nmat / float ( len(bones) )
#ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )
dmap.data.full_matrix()[:,:,:] = nmat[:,:,:]
dmap.data.values_changed()
MapUp ( dmap, False )
if dmesh != None :
dmesh.data.full_matrix()[:,:,:] = nmat[:,:,:]
dmesh.data.values_changed()
MapUp ( dmesh, True )
def DataForAtoms ( atoms, dmap, nname = "data for atoms" ) :
from _multiscale import get_atom_coordinates
points = get_atom_coordinates ( atoms, transformed = True )
points1 = numpy.copy ( points )
_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#points0 = numpy.copy ( points1 )
_contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )
bound = 5
li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )
return ndata
def MapForAtoms ( atoms, dmap, nname, showMesh=False, thrF = 1.0 ) :
ndata = DataForAtoms ( atoms, dmap, nname )
m1 = MapFromData ( ndata, nname, dmap, False, thrF=thrF )
m2 = None
if showMesh :
m2 = MapFromData ( ndata, nname, dmap, True, thrF=thrF )
return [m1,m2]
def MapUp (dmap, showMesh = False, color=(.7,.7,.7,1)) :
ro = VolumeViewer.volume.Rendering_Options()
ro.smoothing_factor = .3
ro.smoothing_iterations = 2
ro.surface_smoothing = False
ro.square_mesh = True
ro.line_thickness = 1
dmap.update_surface ( False, ro )
for sp in dmap.surfacePieces :
v, t = sp.geometry
if len(v) == 8 and len(t) == 12 :
sp.display = False
else :
if showMesh :
sp.color = (color[0]/2.0, color[1]/2.0, color[2]/2.0, 1.0)
sp.displayStyle = sp.Mesh
else :
sp.color = (color[0], color[1], color[2], 0.1)
def MapFromData ( ndata, nname, dmap, showMesh, thrF=1.0, color=(.7,.7,.7,1) ) :
if showMesh :
m = GetMod ( nname + "_mesh" )
if m != None :
chimera.openModels.close ( [m] )
else :
m = GetMod ( nname )
if m != None :
chimera.openModels.close ( [m] )
nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
nv.openState.xform = dmap.openState.xform
nv.name = nname
if showMesh :
nv.name = nname + "_mesh"
nv.region = ( nv.region[0], nv.region[1], [1,1,1] )
nv.surface_levels[0] = dmap.surface_levels[0] * thrF
MapUp(nv, showMesh, color)
return nv
def diha ( a1, a2, a3, a4 ) :
#n1 = vnorm ( a1.coord(), a2.coord(), a3.coord() )
#n2 = vnorm ( a2.coord(), a3.coord(), a4.coord() )
#return numpy.arccos ( n2 * n1 * -1.0 ) * 180.0 / numpy.pi
# http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates
b1 = a2.coord() - a1.coord()
b2 = a3.coord() - a2.coord()
b3 = a4.coord() - a3.coord()
n1 = chimera.cross ( b1, b2 ); n1.normalize()
n2 = chimera.cross ( b2, b3 ); n2.normalize()
m1 = chimera.cross ( n1, b2 ); m1.normalize()
x = n1 * n2
y = m1 * n2
return -1.0 * numpy.arctan2 ( y, x) * 180.0 / numpy.pi
def angle ( a1, a2, a3 ) :
n1 = a1.coord() - a2.coord()
n2 = a3.coord() - a2.coord()
return numpy.arccos ( (n2/n1.length) * (n1/n2.length) ) * 180.0 / numpy.pi
class Bone (object) :
def __init__ (self, a1, a2, a3) :
BoneInit ( self, a1, a2, a3 )
def CS ( self ) :
return CS ( a1.coord(), a2.coord(), a3.coord() )
def CS0 ( self ) :
return CS ( a1.coord0, a2.coord0, a3.coord0 )
def Xf ( self ) :
X,Y,Z = CS ( self.a1.coord(), self.a2.coord(), self.a3.coord() )
return chimera.Xform.coordFrame ( X, Y, Z, self.a1.coord(), True )
def Xf0 ( self ) :
X,Y,Z = CS ( self.a1.coord0, self.a2.coord0, self.a3.coord0 )
return chimera.Xform.coordFrame ( X, Y, Z, self.a1.coord0, True )
def MakeFrame ( self ) :
BoneMakeFrame ( self )
def DistToPoint ( self, pt ) :
return BoneDistToPoint ( self, pt )
def SkinPoint ( self, pt ) :
return BoneSkinPoint ( self, pt )
def BoneInit (bo, a1, a2, a3) :
bo.a1, bo.a2, bo.a3 = a1, a2, a3
bo.X0, bo.Y0, bo.Z0 = CS ( a1.coord0, a2.coord0, a3.coord0 )
bo.F0 = chimera.Xform.coordFrame ( bo.X0, bo.Y0, bo.Z0, bo.a1.coord0, True )
def BoneMakeFrame ( bo ) :
bo.X, bo.Y, bo.Z = CS ( bo.a1.coord(), bo.a2.coord(), bo.a3.coord() )
bo.F = chimera.Xform.coordFrame ( bo.X, bo.Y, bo.Z, bo.a1.coord(), True )
bo.F = bo.F.inverse()
def CS ( p1, p2, p3 ) :
X = p2 - p1; X.normalize()
Y = p3 - p1; Y.normalize()
Z = chimera.cross ( X, Y ); Z.normalize()
Y = chimera.cross ( Z, X ); Y.normalize()
return X,Y,Z
def BoneDistToPoint ( bo, pt ) :
pt = chimera.Point(pt[0], pt[1], pt[2])
V = bo.a2.coord() - bo.a1.coord()
v = pt - bo.a1.coord()
t = V * v
if t < 0.0 :
return v.length
elif t > 1.0 :
return (pt-bo.a2.coord()).length
else :
lp = bo.a1.coord() + (V*t)
return (pt-lp).length
def BoneSkinPoint ( bo, pt ) :
#bo.X, bo.Y, bo.Z = CS ( bo.a1.coord(), bo.a2.coord(), bo.a3.coord() )
#x = chimera.Xform.coordFrame ( bo.X, bo.Y, bo.Z, bo.a1.coord(), True )
#x = x.inverse()
#y = chimera.Xform.coordFrame ( bo.X0, bo.Y0, bo.Z0, bo.a1.coord0, True )
pt = chimera.Point ( pt[0], pt[1], pt[2] )
pt = bo.F.apply ( pt )
pt = bo.F0.apply ( pt )
return [pt[0], pt[1], pt[2]]
def MaskMapResize ( atoms, R, dmap, fout=None ) :
import _multiscale
import _contour
import _volume
from _contour import affine_transform_vertices as transform_vertices
from VolumeData import grid_indices, zone_masked_grid_data, interpolate_volume_data
points = _multiscale.get_atom_coordinates ( atoms, transformed = True )
_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, R )
#mdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = "atom masked" )
mat = mdata.full_matrix()
threshold = 1e-3
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
nz = numpy.nonzero( fpoint_weights )[0]
if len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
#transform_vertices( fpoints, fmap.data.ijk_to_xyz_transform )
#print " - %s mask %d atoms, %d nonzero points" % ( dmap.name, len(atoms), len(nz) )
#transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )
#transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )
bound = 2
li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )
#nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )
#dmat = dmap.full_matrix()
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
#print " - %s origin:" % dmap.name, O
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
#print " - new map origin:", nO
ox = round ( nO[0]/dmap.data.step[0] ) * dmap.data.step[0]
oy = round ( nO[1]/dmap.data.step[1] ) * dmap.data.step[1]
oz = round ( nO[2]/dmap.data.step[2] ) * dmap.data.step[2]
nO = ( ox, oy, oz )
#print " - new map origin:", nO
nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
#dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )
#nze = numpy.nonzero ( dvals )
nmat = dvals.reshape( (nn3,nn2,nn1) )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
if fout == None :
try : nv = VolumeViewer.volume.add_data_set ( ndata, None )
except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
dmap_base = os.path.splitext(dmap.name)[0]
dmap_path = os.path.splitext (dmap.data.path)[0]
nv.name = dmap_base + "_masked"
nv.openState.xform = dmap.openState.xform
return nv
else :
from VolumeData import save_grid_data
#d = self.grid_data()
format = save_grid_data(ndata, fout, None, {}, False)
#print " - saved data"
# ---------------------------------------------------
def mapq_dialog ( create=False ) :
from chimera import dialogs
d = dialogs.find ( dlgName, create=False )
return d
def close_dialog () :
from chimera import dialogs
def setro (ro) :
from chimera import dialogs
d = dialogs.find ( "volume viewer", create=False )
if d :
d.surface_options_panel.set_gui_from_rendering_options (ro)
#d.redisplay_needed_cb()
def vold () :
from chimera import dialogs
d = dialogs.find ( "volume viewer", create=False )
d.surface_options_panel.line_thickness.set(2)
d.redisplay_needed_cb()
set_gui_from_rendering_options
def show_dialog () :
from chimera import dialogs
d = dialogs.find ( dlgName, create=False )
if d :
print " - found old diag"
d.toplevel_widget.update_idletasks ()
d.Close()
d.toplevel_widget.update_idletasks ()
dialogs.register (MapQ_Dialog.name, MapQ_Dialog, replace = True)
d = dialogs.find ( dlgName, create=True )
# Avoid transient dialog resizing when created and mapped for first time.
d.toplevel_widget.update_idletasks ()
d.enter()
return d
def GetMod ( name ) :
for m in chimera.openModels.list() :
if m.name == name :
return m
return None
def SetBBAts ( mol ) :
#if hasattr ( mol, "bbats" ) :
# return
#mol.bbats = True
print " - setting bbAts in %s" % mol.name
for r in mol.residues :
#r.isProt = "C" in r.atomsMap and "CA" in r.atomsMap and "N" in r.atomsMap
#r.isProt = "CA" in r.atomsMap
#r.isNA = "O3'" in r.atomsMap and "O5'" in r.atomsMap
from chimera.resCode import nucleic3to1
from chimera.resCode import protein3to1
protein3to1['HSD'] = protein3to1['HIS']
r.isProt = r.type in protein3to1
r.isNA = r.type in nucleic3to1
if r.isProt :
r.rtype = "prot"
elif r.isNA :
r.rtype = "na"
else :
r.rtype = "?"
if r.isNA :
try :
if nucleic3to1[r.type] == "G" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "C" :
r.baseAt = r.atomsMap["N1"][0]
elif nucleic3to1[r.type] == "A" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "U" :
r.baseAt = r.atomsMap["N1"][0]
except :
print " - baseAt not found - "
pass
r.bbAtoms = []
r.scAtoms = []
if r.isProt :
for a in r.atoms :
if a.element.name == "H" :
a.isBB, a.isSC = False, False
continue
n = a.name
a.isBB = n=="C" or n=="CA" or n=="O" or n=="N" or n=="OT1" or n=="OT2"
a.isSC = not a.isBB
if a.isBB :
r.bbAtoms.append ( a )
else :
r.scAtoms.append ( a )
a.isSugar, a.isBase = False, False
elif r.isNA :
for a in r.atoms :
if a.element.name == "H" :
continue
n = a.name
a.isBB = n=="P" or n=="O1P" or n=="O2P" or n=="O5'" or n=="C5'" or n=="O3'"
a.isSugar = n=="C1'" or n=="C2'" or n=="C3'" or n=="C4'" or n=="O4'" or n=="O2'"
if nucleic3to1[r.type] == "G" :
a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="C6" or n=="O6" or n=="N1" or n=="C2" or n=="N2" or n=="N3"
elif nucleic3to1[r.type] == "C" :
a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="N4" or n=="C5" or n=="C6"
elif nucleic3to1[r.type] == "A" :
a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="N3" or n=="C2" or n=="N1" or n=="C6" or n=="N6"
elif nucleic3to1[r.type] == "U" :
a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="O4" or n=="C5" or n=="C6"
#if nucleic3to1[r.type] == "G" :
# r.isBase = n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n="" or n="" or n=""
# r.baseAt = r.atomsMap["N9"][0]
a.isSC = not a.isBB and not a.isSugar
if a.isBB :
r.bbAtoms.append ( a )
else :
r.scAtoms.append ( a )
else :
for a in r.atoms :
a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False
#def GetVisibleMol () :
# for m in chimera.openModels.list() :
# if m.display == True and type(m) == chimera.Molecule :
# return m
# return None
NA = {
"A" : { "baseAtoms" : ["","",""] }
}
class NA ( object ):
type
|
gregdp/segger
|
Segger/modelz.py
|
Python
|
mit
| 262,914
|
[
"Gaussian"
] |
fa9b5c964fc9ba967c3e835e53b9c7bfff2de8d144796bd569f1612bf95163b2
|
#!/usr/bin/env python
import unittest
import random
import nemo
class IzNetwork(nemo.Network):
def __init__(self):
nemo.Network.__init__(self)
self._type = self.add_neuron_type('Izhikevich')
def add_neuron(self, nidx, a, b, c, d, sigma, u, v):
nemo.Network.add_neuron(self, self._type, nidx, {'a':a,'b':b,'c':c,'d':d,'sigma':sigma},{'u':u,'v':v})
# nemo.Network.add_neuron(self, self._type, nidx, a, b, c, d, sigma, u, v)
def randomSource():
return random.randint(0, 999)
def randomTarget():
return randomSource()
def randomDelay():
return random.randint(1, 20)
def randomWeight():
return random.uniform(-1.0, 1.0)
def randomPlastic():
return random.choice([True, False])
def randomParameterIndex():
return random.randint(0, 4)
def randomStateIndex():
return random.randint(0, 1)
def arg(vlen, gen):
"""
Return either a fixed-length vector or a scalar, with values drawn from 'gen'
"""
vector = random.choice([True, False])
if vector:
return [gen() for n in range(vlen)]
else:
return gen()
class TestFunctions(unittest.TestCase):
def test_network_set_neuron(self):
""" create a simple network and make sure we can get and set parameters
and state variables """
a = 0.02
b = 0.2
c = -65.0+15.0*0.25
d = 8.0-6.0*0.25
v = -65.0
u = b * v
sigma = 5.0
net = IzNetwork()
# This should only succeed for existing neurons
self.assertRaises(RuntimeError, net.set_neuron, 0, a, b, c, d, sigma, u, v)
net.add_neuron(0, a, b, c-0.1, d, sigma, u, v-1.0)
# Getters should fail if given invalid neuron or parameter
self.assertRaises(RuntimeError, net.get_neuron_parameter, 1, 0) # neuron
self.assertRaises(RuntimeError, net.get_neuron_state, 1, 0) # neuron
self.assertRaises(RuntimeError, net.get_neuron_parameter, 0, 5) # parameter
self.assertRaises(RuntimeError, net.get_neuron_state, 0, 2) # state
e = 0.1
# Test setting whole neuron, reading back by parts
net.set_neuron(0, a-e, b-e, c-e, d-e, sigma-e, u-e, v-e)
# Since Python uses double precision and NeMo uses single precision
# internally, the parameters may not be exactly the same after reading
# back.
places = 5
self.assertAlmostEqual(net.get_neuron_parameter(0, 0), a-e, places)
self.assertAlmostEqual(net.get_neuron_parameter(0, 1), b-e, places)
self.assertAlmostEqual(net.get_neuron_parameter(0, 2), c-e, places)
self.assertAlmostEqual(net.get_neuron_parameter(0, 3), d-e, places)
self.assertAlmostEqual(net.get_neuron_parameter(0, 4), sigma-e, places)
self.assertAlmostEqual(net.get_neuron_state(0, 0), u-e, places)
self.assertAlmostEqual(net.get_neuron_state(0, 1), v-e, places)
# Test setting and reading back neuron by parts
net.set_neuron_parameter(0, 0, a)
self.assertAlmostEqual(net.get_neuron_parameter(0, 0), a, places)
net.set_neuron_parameter(0, 1, b)
self.assertAlmostEqual(net.get_neuron_parameter(0, 1), b, places)
net.set_neuron_parameter(0, 2, c)
self.assertAlmostEqual(net.get_neuron_parameter(0, 2), c, places)
net.set_neuron_parameter(0, 3, d)
self.assertAlmostEqual(net.get_neuron_parameter(0, 3), d, places)
net.set_neuron_parameter(0, 4, sigma)
self.assertAlmostEqual(net.get_neuron_parameter(0, 4), sigma, places)
net.set_neuron_state(0, 0, u)
self.assertAlmostEqual(net.get_neuron_state(0, 0), u, places)
net.set_neuron_state(0, 1, v)
self.assertAlmostEqual(net.get_neuron_state(0, 1), v, places)
# Individual setters should fail if given invalid neuron or parameter
self.assertRaises(RuntimeError, net.set_neuron_parameter, 1, 0, 0.0) # neuron
self.assertRaises(RuntimeError, net.set_neuron_state, 1, 0, 0.0) # neuron
self.assertRaises(RuntimeError, net.set_neuron_parameter, 0, 5, 0.0) # parameter
self.assertRaises(RuntimeError, net.set_neuron_state, 0, 2, 0.0) # state
def check_neuron_function(self, fun, ncount):
vlen = random.randint(2, ncount)
a = arg(vlen, random.random)
b = arg(vlen, random.random)
c = arg(vlen, random.random)
d = arg(vlen, random.random)
u = arg(vlen, random.random)
v = arg(vlen, random.random)
s = arg(vlen, random.random)
vectorized = any(isinstance(x, list) for x in [a, b, c, d, u, v, s])
if vectorized:
fun(range(vlen), a, b, c, d, s, u, v)
else:
fun(random.randint(0,1000), a, b, c, d, s, u, v)
def test_add_neuron(self):
"""
The add_neuron method supports either vector or scalar input. This
test calls set_synapse in a large number of ways, checking for
catastrophics failures in the boost::python layer
"""
for test in range(1000):
net = IzNetwork()
self.check_neuron_function(net.add_neuron, ncount=1000)
def test_set_neuron(self):
"""
The set_neuron method supports either vector or scalar input. This
test calls set_synapse in a large number of ways, checking for
catastrophics failures in the boost::python layer
"""
net = IzNetwork()
ncount = 1000
net.add_neuron(range(ncount), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
for test in range(1000):
self.check_neuron_function(net.set_neuron, ncount=1000)
sim = nemo.Simulation(net, nemo.Configuration())
for test in range(1000):
self.check_neuron_function(sim.set_neuron, ncount=1000)
def check_set_neuron_vector(self, obj, pop):
"""
Test vector/scalar forms of set_neuron for either network or simulation
pop -- list of neuron
"""
for test in range(1000):
vlen = random.randint(2, 100)
# We need uniqe neurons here, for defined behaviour
vector = random.choice([True, False])
if vector:
neuron = random.sample(pop, vlen)
value = [random.random() for n in neuron]
else:
neuron = random.choice(pop)
value = random.random()
def assertListsAlmostEqual(value, ret):
if vector:
self.assertEqual(vlen, len(ret))
self.assertEqual(vlen, len(value))
self.assertEqual(vlen, len(neuron))
[self.assertAlmostEqual(a, b, 5) for (a,b) in zip(value, ret)]
else:
self.assertAlmostEqual(value, ret, 5)
# check neuron parameter
param = randomParameterIndex()
obj.set_neuron_parameter(neuron, param, value)
ret = obj.get_neuron_parameter(neuron, param)
assertListsAlmostEqual(value, ret)
# check neuron state
var = randomStateIndex()
obj.set_neuron_state(neuron, var, value)
ret = obj.get_neuron_state(neuron, var)
assertListsAlmostEqual(value, ret)
def test_network_set_neuron_vector(self):
"""
Test for failures in vector/scalar form of set_neuron
The set_neuron_parameter methods supports either vector or scalar
input. This test calls this function in a large number of ways,
checking for catastrophics failures in the boost::python layer
"""
net = IzNetwork()
pop = range(1000)
for n in pop:
net.add_neuron(n, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.check_set_neuron_vector(net, pop)
def test_sim_set_neuron_vector(self):
"""
Test for failures in vector/scalar form of set_neuron
The set_neuron_parameter methods supports either vector or scalar
input. This test calls this function in a large number of ways,
checking for catastrophics failures in the boost::python layer
"""
net = IzNetwork()
conf = nemo.Configuration()
pop = range(1000)
for n in pop:
net.add_neuron(n, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
sim = nemo.Simulation(net, conf)
self.check_set_neuron_vector(sim, pop)
def simple_network(self):
net = IzNetwork()
net.add_neuron(0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
net.add_neuron(1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
net.add_synapse(0, 1, 1, 5.0, False)
net.add_synapse(1, 0, 1, 5.0, False)
return (net, nemo.Simulation(net, nemo.Configuration()))
def test_get_neuron_scalar(self):
"""
Test that singleton arguments to neuron getters work as either scalar
or singleton list.
"""
def check(x):
x.get_neuron_state([0], 0)
x.get_neuron_state(0, 0)
x.get_neuron_parameter([0], 0)
x.get_neuron_parameter(0, 0)
(net, sim) = self.simple_network()
check(net)
check(sim)
def test_set_neuron_scalar(self):
"""
Test that singleton arguments to neuron setters work as either scalar
or singleton list.
"""
def check(x):
x.set_neuron_state([0], 0, [0])
x.set_neuron_state(0, 0, 0)
x.set_neuron_parameter([0], 0, [0])
x.set_neuron_parameter(0, 0, 0)
(net, sim) = self.simple_network()
check(net)
check(sim)
def test_get_synapse_scalar(self):
"""
Test that singleton arguments to synapse getters work as either scalar
or singleton list.
"""
def check(x):
x.get_synapse_source(0)
x.get_synapse_source([0])
x.get_synapse_target(0)
x.get_synapse_target([0])
x.get_synapse_delay(0)
x.get_synapse_delay([0])
x.get_synapse_weight(0)
x.get_synapse_weight([0])
x.get_synapse_plastic(0)
x.get_synapse_plastic([0])
(net, sim) = self.simple_network()
check(net)
check(sim)
def test_add_synapse(self):
"""
The add_synapse method supports either vector or scalar input. This
test calls set_synapse in a large number of ways, checking for
catastrophics failures in the boost::python layer
"""
net = IzNetwork()
for test in range(1000):
vlen = random.randint(2, 500)
source = arg(vlen, randomSource)
target = arg(vlen, randomTarget)
delay = arg(vlen, randomDelay)
weight = arg(vlen, randomWeight)
plastic = arg(vlen, randomPlastic)
ids = net.add_synapse(source, target, delay, weight, plastic)
vectorized = any(isinstance(n, list) for n in [source, target, delay, weight, plastic])
if vectorized:
self.assertTrue(isinstance(ids, list))
self.assertEqual(len(ids), vlen)
else:
self.assertFalse(isinstance(ids, list))
def test_get_synapses_from_unconnected(self):
net = IzNetwork()
net.add_neuron(0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.assertEqual(len(net.get_synapses_from(0)), 0)
sim = nemo.Simulation(net, nemo.Configuration())
self.assertEqual(len(sim.get_synapses_from(0)), 0)
def test_get_synapse(self):
"""
Test scalar and vector form of synapse getters
Synapse getters have both scalar and vector forms. To test these,
construct a network with fixed connectivity where all synapse
properties are functions of the source and target, then read back and
verify that the values are as expected.
"""
def delay(source, target):
return 1 + ((source + target) % 20)
def plastic(source, target):
return (source + target) % 1 == 0
def weight(source, target):
return float(source) + float(target)
ncount = 100
net = IzNetwork()
for src in range(ncount):
net.add_neuron(src, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
for tgt in range(src+1):
net.add_synapse(src, tgt, delay(src, tgt), weight(src, tgt), plastic(src, tgt))
conf = nemo.Configuration()
sim = nemo.Simulation(net, conf)
def check_scalar(x, known_source, sid, source, target):
self.assertEqual(known_source, source)
self.assertEqual(x.get_synapse_delay(sid), delay(source, target))
self.assertEqual(x.get_synapse_weight(sid), weight(source, target))
self.assertEqual(x.get_synapse_plastic(sid), plastic(source, target))
def check(x):
for src in range(ncount):
all_synapses = x.get_synapses_from(src)
# read a random number of these out-of-order
n_queried = random.randint(1, len(all_synapses))
queried = random.sample(all_synapses, n_queried)
if len(queried) == 1:
queried = queried[0]
sources = x.get_synapse_source(queried)
targets = x.get_synapse_target(queried)
if n_queried == 1:
check_scalar(x, src, queried, sources, targets)
else:
for (sid, qsrc, tgt) in zip(queried, sources, targets):
check_scalar(x, src, sid, qsrc, tgt)
def check_iterator(x):
# Make synapse getter can deal with the iterator returned by the
# the synapse query
for src in range(ncount):
srcs = x.get_synapse_source(x.get_synapses_from(src))
check(net)
check(sim)
check_iterator(net)
check_iterator(sim)
if __name__ == '__main__':
unittest.main()
|
pmediano/NeMo
|
src/api/python/test.py
|
Python
|
gpl-2.0
| 14,108
|
[
"NEURON"
] |
922bfd55cefdaafafefc0276c50b83bb4c932b47770c008b3f76288fa42109fa
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Auxiliary Readers --- :mod:`MDAnalysis.auxiliary.base`
======================================================
Base classes for deriving all auxiliary data readers. See the API in :mod:`MDAnalysis.auxiliary.__init__`.
.. autoclass:: AuxStep
:members:
.. autoclass:: AuxReader
:members:
.. autoclass:: AuxFileReader
:members:
"""
import os
import numbers
import math
import warnings
import numpy as np
from ..lib.util import asiterable, anyopen
from . import _AUXREADERS
class _AuxReaderMeta(type):
# auto register on class creation
def __init__(cls, name, bases, classdict):
type.__init__(type, name, bases, classdict)
try:
fmt = asiterable(classdict['format'])
except KeyError:
pass
else:
for f in fmt:
_AUXREADERS[f] = cls
class AuxStep(object):
"""Base class for auxiliary timesteps.
Stores the auxiliary data for the current auxiliary step. On creation,
``step`` is set to -1.
Parameters
----------
dt : float, optional
Change in time between auxiliary steps (in ps). If not specified, will
attempt to determine from auxiliary data; otherwise defaults to 1 ps.
Ignored if ``constant_dt`` is False.
initial_time : float, optional
Time of first auxiliary step (in ps). If not specified, will attempt to
determine from auxiliary data; otherwise defaults to 0 ps. Ignored if
``constant_dt`` is False.
time_selector: optional
Key to select 'time' value from the full set of data read for each
step, if time selection is enabled; type will vary depending on the
auxiliary data format (see individual AuxReader documentation). If
``None`` (default value), time is instead calculated as: ``time = step
* dt + initial_time``
data_selector: optional
Key(s) to select auxiliary data values of interest from the full set of
data read for each step, if data selection is enabled by the reader;
type will vary depending on the auxiliary data format (see individual
AuxReader documentation).
If ``None`` (default value), the full set of data is returned.
constant_dt : bool, optional
(Default: True) Set to False if ``dt`` is not constant
throughout the auxiliary data set, in which case a valid
``time_selector`` must be provided.
Attributes
----------
step : int
Number of the current auxiliary step (0-based).
"""
def __init__(self, dt=1, initial_time=0, time_selector=None,
data_selector=None, constant_dt=True):
self.step = -1
self._initial_time = initial_time
self._dt = dt
self._constant_dt = constant_dt
# check for valid values when assigning _time/data_selector will fail
# here as we don't have and _data yet, so set _time/data_selector_ directly;
# if invalid, will catch later
self._time_selector_ = time_selector
self._data_selector_ = data_selector
@property
def time(self):
""" Time in ps of current auxiliary step (as float).
Read from the set of auxiliary data read each step if time selection
is enabled and a valid ``time_selector`` is specified; otherwise
calculated as ``step * dt + initial_time``.
"""
if self._time_selector is not None:
return self._select_time(self._time_selector)
elif self._constant_dt:
# default to calculting time...
return self.step * self._dt + self._initial_time
else:
raise ValueError("If dt is not constant, must have a valid "
"time selector")
@property
def data(self):
""" Auxiliary values of interest for the current step (as ndarray).
Read from the full set of data read for each step if data selection is
enabled and a valid ``data_selector`` is specified; otherwise
defaults to the full set of data.
"""
if self._data_selector is not None:
return self._select_data(self._data_selector)
# default to full set of data...
return self._data
@property
def _time_selector(self):
""" 'Key' to select time from the full set of data read in each step.
Will be passed to ``_select_time()``, defined separately for each
auxiliary format, when returning the time of the current step.
Format will depend on the auxiliary format. e.g. for the XVGReader,
this is an index and ``_select_time()`` returns the value in that column
of the current step data.
Defaults to 'None' if time selection is not enabled.
"""
try:
self._select_time
except AttributeError:
warnings.warn("{} does not support time selection. Reverting to "
"default".format(self.__class__.__name__))
return None
return self._time_selector_
@_time_selector.setter
def _time_selector(self, new):
# check we have a select_time method
try:
select = self._select_time
except AttributeError:
warnings.warn("{} does not support time selection".format(
self.__class__.__name__))
else:
# check *new* is valid before setting; _select_time should raise
# an error if not
select(new)
self._time_selector_ = new
@property
def _data_selector(self):
""" 'Key' to select values of interest from full set of auxiliary data.
These are the values that will be stored in ``data`` (and
``frame_data`` and ``frame_rep``).
Will be passed to ``_select_data()``, defined separately for each
auxiliary format, when returning the data of interest for the current
step (``data``). Format will depend on the auxiliary format; e.g.
for the XVGReader, this is a list of indices and `_select_data()` returns
the value(s) in those columns of the current step data.
Defaults to 'None' if data selection is not enabled.
"""
try:
self._select_data
except AttributeError:
warnings.warn("{} does not support data selection. Reverting to "
"default".format(self.__class__.__name__))
return None
return self._data_selector_
@_data_selector.setter
def _data_selector(self, new):
# check we have a select_data method
try:
select = self._select_data
except AttributeError:
warnings.warn(
"{} does not support data selection".format(self.__class__.__name__)
)
else:
# check *new* is valid before setting; _select_data should raise an
# error if not
select(new)
self._data_selector_ = new
def _empty_data(self):
""" Create an 'empty' ``data``-like placeholder.
Returns an ndarray in the format of ``data`` with all values set to
np.nan; to use at the 'representative value' when no auxiliary steps
are assigned to a trajectory timestep/within the cutoff.
Default behaviour here works when ``data`` is a ndarray of floats. May
need to overwrite in individual format's AuxSteps.
"""
return np.full_like(self.data, np.nan)
class AuxReader(metaclass=_AuxReaderMeta):
""" Base class for auxiliary readers.
Allows iteration over a set of data from a trajectory, additional
('auxiliary') to the regular positions/velocities/etc. This auxiliary
data may be stored in e.g. an array or a separate file.
See the :ref:`Auxiliary API` for more on use.
Parameters
----------
auxname : str, optional
Name for auxiliary data. When added to a trajectory, the representative
auxiliary value(s) for the timestep may be accessed as ``ts.aux.auxname``
or ``ts.aux['auxname']``.
represent_ts_as : str
Method to use to calculate representative value of auxiliary data for a
trajectory timestep. See :func:`calc_representative` for valid options.
cutoff : float, optional
Auxiliary steps further from the trajectory timestep than *cutoff*
(in ps) will be ignored when calculating representative values. If -1
(default), all auxiliary steps assigned to that timestep will be used.
**kwargs
Options to be passed to :class:`~AuxStep`
Attributes
----------
auxstep :
:class:`~AuxStep` object containing data for current step.
frame_data : dict
Dictionary containing ``data`` from each auxiliary step assigned to the
current trajectory timestep, indexed by the difference in time between
the step and trajectory timestep (i.e. ``auxstep.time - ts.time``; in ps)
frame_rep : ndarray
Representative value(s) of auxiliary data for current trajectory timestep.
Note
----
Auxiliary data are assumed to be time ordered and contain no duplicates.
"""
_Auxstep = AuxStep
# update when add new options
represent_options = ['closest', 'average']
# list of attributes required to recreate the auxiliary
required_attrs = ['represent_ts_as', 'cutoff', 'dt', 'initial_time',
'time_selector', 'data_selector', 'constant_dt', 'auxname',
'format', '_auxdata']
def __init__(self, represent_ts_as='closest', auxname=None, cutoff=-1,
**kwargs):
# allow auxname to be optional for when using reader separate from
# trajectory.
self.auxname = auxname
self.represent_ts_as = represent_ts_as
self.cutoff = cutoff
self.frame_data = None
self.frame_rep = None
self.auxstep = self._Auxstep(**kwargs)
self._read_next_step()
# if dt is constant and auxiliary data includes time, calculate
# initial time and dt
if self.time_selector is not None and self.constant_dt:
self.auxstep._initial_time = self.time
self._read_next_step()
self.auxstep._dt = self.time - self.initial_time
self.rewind()
def copy(self):
raise NotImplementedError("Copy not implemented for AuxReader")
def __len__(self):
""" Number of steps in auxiliary data. """
return self.n_steps
def next(self):
""" Move to next step of auxiliary data. """
return self._read_next_step()
def __next__(self):
""" Move to next step of auxiliary data. """
return self.next()
def __iter__(self):
""" Iterate over all auxiliary steps. """
self._restart()
return self
def _restart(self):
""" Reset back to start; calling next() should read first step. """
# Overwrite as appropriate
self.auxstep.step = -1
def rewind(self):
""" Return to and read first step. """
# Overwrite as appropriate
# could also use _go_to_step(0)
self._restart()
return self._read_next_step()
def _read_next_step(self):
""" Move to next step and update auxstep.
Should return the AuxStep instance corresponding to the next step.
"""
# Define in each auxiliary reader
raise NotImplementedError(
"BUG: Override _read_next_timestep() in auxiliary reader!")
def update_ts(self, ts):
""" Read auxiliary steps corresponding to and update the trajectory
timestep *ts*.
Calls :meth:`read_ts`, then updates *ts* with the representative value.
``auxname`` must be set; the representative value will be accessible in
*ts* as ``ts.aux.auxname`` or ``ts.aux['auxname']``.
Parameters
----------
ts : :class:`~MDAnalysis.coordinates.base.Timestep` object
The trajectory timestep for which corresponding auxiliary data is
to be read and updated.
Returns
-------
:class:`~MDAnalysis.coordinates.base.Timestep`
*ts* with the representative auxiliary
value in ``ts.aux`` be updated appropriately.
Raises
------
ValueError
If ``auxname`` is not set.
See Also
--------
:meth:`read_ts`
"""
if not self.auxname:
raise ValueError("Auxiliary name not set, cannot set representative "
"value in timestep. Name auxiliary or use read_ts "
"instead")
self.read_ts(ts)
setattr(ts.aux, self.auxname, self.frame_rep)
return ts
def read_ts(self, ts):
""" Read auxiliary steps corresponding to the trajectory timestep *ts*.
Read the auxiliary steps 'assigned' to *ts* (the steps that are within
``ts.dt/2`` of of the trajectory timestep/frame - ie. closer to *ts*
than either the preceding or following frame). Then calculate a
'representative value' for the timestep from the data in each of these
auxiliary steps.
To update *ts* with the representative value, use ``update_ts`` instead.
Parameters
----------
ts : :class:`~MDAnalysis.coordinates.base.Timestep` object
The trajectory timestep for which corresponding auxiliary data is
to be read.
See Also
--------
:meth:`update_ts`
Note
----
The auxiliary reader will end up positioned at the last step assigned
to the trajectory frame or, if the frame includes no auxiliary steps,
(as when auxiliary data are less frequent), the most recent auxiliary
step before the frame.
"""
# Make sure our auxiliary step starts at the right point (just before
# the frame being read): the current step should be assigned to a
# previous frame, and the next step to either the frame being read or a
# following frame. Move to right position if not.
frame_for_step = self.step_to_frame(self.step, ts)
frame_for_next_step = self.step_to_frame(self.step+1, ts)
if frame_for_step is not None:
if frame_for_next_step is None:
# self.step is the last auxiliary step in memory.
if frame_for_step >= ts.frame:
self.move_to_ts(ts)
elif not (frame_for_step < ts.frame <= frame_for_next_step):
self.move_to_ts(ts)
self._reset_frame_data() # clear previous frame data
# read through all the steps 'assigned' to ts.frame + add to frame_data
while self.step_to_frame(self.step+1, ts) == ts.frame:
self._read_next_step()
self._add_step_to_frame_data(ts.time)
self.frame_rep = self.calc_representative()
def step_to_frame(self, step, ts, return_time_diff=False):
""" Calculate closest trajectory frame for auxiliary step *step*.
Calculated given dt, time and frame from *ts*::
time_frame_0 = ts.time - ts.frame*ts.dt # time at frame 0
frame = floor((step_to_time(step) - time_frame_0 + ts.dt/2)/ts.dt))
The difference in time between the step and the calculated frame can
also optionally be returned with *return_time_diff*.
Parameters
----------
step : int
Number of the auxiliary step to calculate closest trajectory frame
for.
ts : :class:`~MDAnalysis.coordinates.base.Timestep` object
(Any) timestep from the trajectory the calculated frame number is to
correspond to.
return_time_diff : bool, optional
(Default: False) Additionally return the time difference between
*step* and returned frame.
Returns
-------
frame_index : int or None
Number of the trajectory frame closest (in time) to the given
auxiliary step. If the step index is out of range for the auxiliary
data, ``None`` is returned instead.
time_diff : float (optional)
Difference in time between *step* and *frame_index*.
Note
----
Assumes trajectory dt is consant.
The returned frame number may be out of range for the trajectory.
"""
if step >= self.n_steps or step < 0:
return None
time_frame_0 = ts.time - ts.frame*ts.dt # assumes ts.dt is constant
time_step = self.step_to_time(step)
frame_index = int(math.floor((time_step-time_frame_0+ts.dt/2.)/ts.dt))
if not return_time_diff:
return frame_index
else:
time_frame = time_frame_0 + frame_index*ts.dt
time_diff = abs(time_frame - time_step)
return frame_index, time_diff
def move_to_ts(self, ts):
""" Position auxiliary reader just before trajectory timestep *ts*.
Calling ``next()`` should read the first auxiliary step 'assigned' to
the trajectory timestep *ts* or, if no auxiliary steps are
assigned to that timestep (as in the case of less frequent auxiliary
data), the first auxiliary step after *ts*.
Parameters
----------
ts : :class:`~MDAnalysis.coordinates.base.Timestep` object
The trajectory timestep before which the auxiliary reader is to
be positioned.
"""
# figure out what step we want to end up at
if self.constant_dt:
# if dt constant, calculate from dt/offset/etc
step = int(math.floor((ts.time-ts.dt/2-self.initial_time)/self.dt))
# if we're out of range of the number of steps, reset back
step = max(min(step, self.n_steps-1), -1)
else:
# otherwise, go through steps till we find the right one
for i in range(self.n_steps+1):
if self.step_to_frame(i) >= ts.frame:
break
# we want the step before this
step = i-1
if step == -1:
self._restart()
else:
self._go_to_step(step)
def next_nonempty_frame(self, ts):
""" Find the next trajectory frame for which a representative auxiliary
value can be calculated.
That is, the next trajectory frame to which one or more auxiliary steps
are assigned and fall within the cutoff.
Starts looking from the current step time. If the end of the auxiliary
data is reached before a trajectory frame is found, None is returned.
Parameters
----------
ts : :class:`~MDAnalysis.coordinates.base.Timestep` object
Any timestep from the trajectory for which the next 'non-empty'
frame is to be found.
Returns
-------
int
Index of the next auxiliary-containing frame in the trajectory.
Note
----
The returned index may be out of range for the trajectory.
"""
step = self.step
while step < self.n_steps-1:
next_frame, time_diff = self.step_to_frame(self.step+1, ts,
return_time_diff=True)
if self.cutoff != -1 and time_diff > self.cutoff:
# 'representative values' will be NaN; check next step
step = step + 1
else:
return next_frame
# we ran out of auxiliary steps...
return None
def __getitem__(self, i):
""" Return the AuxStep corresponding to the *i*-th auxiliary step(s)
(0-based). Negative numbers are counted from the end.
*i* may be an integer (in which case the corresponding AuxStep is
returned) or a list of integers or slice (in which case an iterator is
returned)::
step_10 = aux_reader[10]
will move to step 10 of the auxiliary and return the :class:`AuxStep`.
By using a slice/list, we can iterated over specified steps in the
auxiliary, e.g. when performing analysis ::
for auxstep in aux_reader[100:200]: # analyse only steps 100 to 200
run_analysis(auxstep)
for auxstep in aux_reader[::10] # analyse every 10th step
run_analysis(auxstep)
"""
if isinstance(i, numbers.Integral):
i = self._check_index(i)
return self._go_to_step(i)
elif isinstance(i, (list, np.ndarray)):
return self._list_iter([self._check_index(x) for x in i])
elif isinstance(i, slice):
# default start to first frame (ie. 0)
start = self._check_index(i.start) if i.start is not None else 0
# default stop to after last frame (i.e. n_steps)
# n_steps is a valid stop index but will fail _check_index;
# deal with separately
stop = (i.stop if i.stop == self.n_steps
else self._check_index(i.stop) if i.stop is not None
else self.n_steps)
step = i.step or 1
if not isinstance(step, numbers.Integral) or step < 1:
raise ValueError("Step must be positive integer") # allow -ve?
if start > stop:
raise IndexError("Stop frame is lower than start frame")
return self._slice_iter(slice(start,stop,step))
else:
raise TypeError("Index must be integer, list of integers or slice")
def _check_index(self, i):
if not isinstance(i, numbers.Integral):
raise TypeError("Step indices must be integers")
if i < 0:
i = i + self.n_steps
if i < 0 or i >= self.n_steps:
raise IndexError("{} is out of range of auxiliary (num. steps "
"{})".format(i, self.n_steps))
return i
def _list_iter(self, i):
for j in i:
yield self._go_to_step(j)
def _slice_iter(self, i):
for j in range(i.start, i.stop, i.step):
yield self._go_to_step(j)
def _go_to_step(self, i):
""" Move to and read i-th auxiliary step. """
# Need to define in each auxiliary reader
raise NotImplementedError(
"BUG: Override _go_to_step() in auxiliary reader!")
def _reset_frame_data(self):
self.frame_data = {}
def _add_step_to_frame_data(self, ts_time):
""" Update ``frame_data`` with values for the current step.
Parameters
----------
ts_time : float
the time of the timestep the current step is being 'added to'. Used
to calculate difference in time between current step and timestep.
"""
time_diff = self.time - ts_time
self.frame_data[time_diff] = self.auxstep.data
def calc_representative(self):
""" Calculate representative auxiliary value(s) from the data in
*frame_data*.
Currently implemented options for calculating representative value are:
* `closest`: default; the value(s) from the step closest to in time
to the trajectory timestep
* `average`: average of the value(s) from steps 'assigned' to the
trajectory timestep.
Additionally, if ``cutoff`` is specified, only steps within this time
of the trajectory timestep are considered in calculating the
representative.
If no auxiliary steps were assigned to the timestep, or none fall
within the cutoff, representative values are set to ``np.nan``.
Returns
-------
ndarray
Array of auxiliary value(s) 'representative' for the timestep.
"""
if self.cutoff == -1:
cutoff_data = self.frame_data
else:
cutoff_data = {key: val for key, val in self.frame_data.items()
if abs(key) <= self.cutoff}
if len(cutoff_data) == 0:
# no steps are 'assigned' to this trajectory frame, so return
# values of ``np.nan``
value = self.auxstep._empty_data()
elif self.represent_ts_as == 'closest':
min_diff = min([abs(i) for i in cutoff_data])
# we don't know the original sign, and might have two equally-spaced
# steps; check the earlier time first
try:
value = cutoff_data[-min_diff]
except KeyError:
value = cutoff_data[min_diff]
elif self.represent_ts_as == 'average':
value = np.mean(np.array([val for val in cutoff_data.values()]),
axis=0)
return value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def close(self):
# Overwrite as appropriate
pass
@property
def n_steps(self):
""" Total number of steps in the auxiliary data. """
try:
return self._n_steps
except AttributeError:
self._n_steps = self._count_n_steps()
return self._n_steps
def step_to_time(self, i):
""" Return time of auxiliary step *i*.
Calculated using ``dt`` and ``initial_time`` if ``constant_dt`` is True;
otherwise from the list of times as read from the auxiliary data for
each step.
Parameters
----------
i : int
Index (0-based) of step to return time for
Returns
-------
time : float
Time (in ps) of step *i*
Raises
------
ValueError
When *i* not in valid range
"""
if i >= self.n_steps:
raise ValueError("{0} is not a valid step index (total number of "
"steps is {1})".format(i, self.n_steps))
if self.constant_dt:
return i*self.dt+self.initial_time
else:
try:
return self._times[i]
except AttributeError:
self._times = self.read_all_times()
return self._times[i]
@property
def represent_ts_as(self):
""" Method by which 'representative' timestep values of auxiliary data
will be calculated.
"""
return self._represent_ts_as
@represent_ts_as.setter
def represent_ts_as(self, new):
if new not in self.represent_options:
raise ValueError("{0} is not a valid option for calculating "
"representative value(s). Enabled options are: "
"{1}".format(new, self.represent_options))
self._represent_ts_as = new
def __del__(self):
self.close()
def get_description(self):
""" Get the values of the parameters necessary for replicating the
AuxReader.
An AuxReader can be duplicated using
:func:`~MDAnalysis.auxiliary.core.auxreader`::
description = original_aux.get_description()
new_aux = MDAnalysis.auxiliary.auxreader(**description)
The resulting dictionary may also be passed directly to
:meth:`~MDAnalysis.coordinates.base.ProtoReader.add_auxiliary` to
reload an auxiliary into a trajectory::
trajectory.add_auxiliary(**description)
Returns
-------
dict
Key-word arguments and values that can be used to replicate the
AuxReader.
"""
description = {attr.strip('_'): getattr(self, attr)
for attr in self.required_attrs}
return description
def __eq__(self, other):
for attr in self.required_attrs:
if getattr(self, attr) != getattr(other, attr):
return False
return True
@property
def step(self):
"""Number of the current auxiliary step (as stored in ``auxstep``;
0-based)."""
return self.auxstep.step
@property
def time(self):
"""Time of current auxiliary step (as stored in ``auxstep``; in ps)"""
return self.auxstep.time
@property
def dt(self):
"""Change in time between auxiliary steps (as stored in ``auxstep``;
in ps)"""
return self.auxstep._dt
@property
def initial_time(self):
"""Time of first auxiliary step (as stored in ``auxstep``; in ps)"""
return self.auxstep._initial_time
@property
def time_selector(self):
"""Key to select 'time' value from the full set of data read for each step.
As stored in ``austep``.
Type differs between auxiliary formats, depending how the data for each
step is read in and stored; e.g. data from .xvg files is read in as a
list and `time_selector` must be a valid index. If time selection is not
enabled by the reader, ``time_selector`` will default to ``None``.
See each individual auxiliary reader.
"""
return self.auxstep._time_selector
@time_selector.setter
def time_selector(self, new):
old = self.auxstep._time_selector
self.auxstep._time_selector = new
if old != new:
# if constant_dt is False and so we're using a _times list, this will
# now be made invalid
try:
del(self._times)
except AttributeError:
pass
@property
def data_selector(self):
"""Key(s) to select auxiliary data values of interest from the full set
of data read for each step (as stored in ``auxstep``).
Type differs between auxiliary formats, depending how the data for each
step is read in and stored - e.g. data from .xvg files is read in as
a list and `data_selector` must be a list of valid indicies. If data
selection is not enabled by the reader, ``data_selector`` will default
to ``None``.
See each individual auxiliary reader.
"""
return self.auxstep._data_selector
@data_selector.setter
def data_selector(self, new):
self.auxstep._data_selector = new
@property
def constant_dt(self):
""" True if ``dt`` is constant throughout the auxiliary (as stored in
``auxstep``) """
return self.auxstep._constant_dt
@constant_dt.setter
def constant_dt(self, new):
self.auxstep._constant_dt = new
class AuxFileReader(AuxReader):
""" Base class for auxiliary readers that read from file.
Extends AuxReader with attributes and methods particular to reading
auxiliary data from an open file, for use when auxiliary files may be too
large to read in at once.
Parameters
----------
filename : str
Location of the file containing the auxiliary data.
**kwargs
Other AuxReader options.
See also
--------
:class:`AuxReader`
Attributes
----------
auxfile
File object for the auxiliary file.
"""
def __init__(self, filename, **kwargs):
self.auxfile = anyopen(filename)
self._auxdata = os.path.abspath(filename)
super(AuxFileReader, self).__init__(**kwargs)
def close(self):
""" Close *auxfile*. """
if self.auxfile is None:
return
self.auxfile.close()
self.auxfile = None
def _restart(self):
""" Reposition to just before first step. """
self.auxfile.seek(0)
self.auxstep.step = -1
def _reopen(self):
""" Close and then reopen *auxfile*. """
if self.auxfile is not None:
self.auxfile.close()
self.auxfile = open(self._auxdata)
self.auxstep.step = -1
def _go_to_step(self, i):
""" Move to and read i-th auxiliary step.
Parameters
----------
i : int
Step number (0-indexed) to move to
Raises
------
ValueError
If step index not in valid range.
Note
----
Works by reading through all steps consecutively until correct step
is reached. Overwrite if this can be done more efficiently.
"""
## could seek instead?
if i >= self.n_steps:
raise ValueError("Step index {0} is not valid for auxiliary "
"(num. steps {1}!".format(i, self.n_steps))
value = self.rewind()
while self.step != i:
value = self.next()
return value
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/auxiliary/base.py
|
Python
|
gpl-2.0
| 33,900
|
[
"MDAnalysis"
] |
d3ac6bb155acec3acb96513cf2dab1ba3175ec7e81a3de0130a20f9d31c5e563
|
# -*- coding: utf-8 -*-
#
# PyGauss documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 14 01:13:38 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__', '__name__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return Mock()
else:
return Mock()
def __getitem__(self, index):
raise IndexError()
def __mul__(self, other):
return Mock()
MOCK_MODULES = ['cclib', 'cclib.parser', 'cclib.parser.utils',
'chemlab', 'chemlab.graphics', 'chemlab.db', 'chemlab.graphics.renderers',
'chemlab.graphics.renderers.base','chemlab.graphics.renderers.sphere',
'chemlab.graphics.renderers.sphere_imp','chemlab.graphics.renderers.point',
'chemlab.graphics.colors', 'chemlab.graphics.buffers', 'chemlab.core',
'chemlab.io', 'chemlab.io.handlers', 'chemlab.graphics.qtviewer',
'chemlab.graphics.buffers', 'chemlab.graphics.shaders', 'chemlab.io.handlers.base',
'chemlab.graphics.camera', 'chemlab.graphics.renderers.wireframe',
'chemlab.utils', 'chemlab.qc', 'chemlab.qc.pgbf',
'chemview', 'chemview.widget', 'chemview.utils', 'chemview.marchingcubes',
'paramiko', 'numpy', 'numpy.linalg',
'OpenGL', 'OpenGL.GL',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.cm', 'matplotlib.offsetbox',
'matplotlib.colors', 'mpl_toolkits', 'mpl_toolkits.mplot3d',
'pandas', 'pandas.tools', 'pandas.tools.plotting', 'pandas.core', 'pandas.core.index',
'sklearn', 'sklearn.cluster',
'IPython', 'IPython.display', 'IPython.core', 'IPython.core.display',
'IPython.utils', 'IPython.utils.traitlets',
'scipy', 'scipy.signal', 'scipy.interpolate',
'nose', 'nose_parameterized',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
import urllib2
import json
git_history = urllib2.urlopen('https://api.github.com/repos/chrisjsewell/Pygauss/releases')
git_history_json = json.load(git_history)
with open('history.rst', 'w') as f:
f.write('Whats New\n')
f.write('---------\n')
f.write('\n')
for r in git_history_json:
f.write(' '.join([r['tag_name'],'-',r['name'],'\n']))
f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
f.write('\n')
for line in r['body'].split('\n'):
f.write(' '.join([line, '\n']))
f.write('\n')
git_issues = urllib2.urlopen('https://api.github.com/repos/chrisjsewell/Pygauss/issues')
git_issues_json = json.load(git_issues)
with open('enhancements.rst', 'w') as f:
f.write('Whats To Come\n')
f.write('--------------\n')
f.write('\n')
for r in git_issues_json:
if not r["state"] == "open":
continue
labels = r['labels']
for l in labels:
if l['name'] == 'new feature':
f.write(' '.join([r['title'],'\n']))
f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
f.write('\n')
for line in r['body'].split('\n'):
f.write(' '.join([line, '\n']))
f.write('\n')
break
import inspect
sys.path.insert(0, os.path.abspath('../..'))
import pygauss
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.dirname(inspect.getfile(pygauss)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyGauss'
copyright = u'2015, Chris Sewell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pygauss.__version__
# The full version, including alpha/beta/rc tags.
release = pygauss.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'PyGauss'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/molecule.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'molecule.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyGaussdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyGauss.tex', u'PyGauss Documentation',
u'Chris Sewell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pygauss', u'PyGauss Documentation',
[u'Chris Sewell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyGauss', u'PyGauss Documentation',
u'Chris Sewell', 'PyGauss', 'A Python API for analysis of Gaussian computations',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'IPython': ('http://ipython.org/ipython-doc/stable/', None),
'docx': ('http://python-docx.readthedocs.org/en/latest/', None),
'PIL': ('http://pillow.readthedocs.org/', None),
'pygauss' : ('http://pygauss.readthedocs.org/en/stable/', None)
}
autoclass_content = 'init'
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
chrisjsewell/PyGauss
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 12,609
|
[
"Gaussian",
"cclib"
] |
e80758c8c3244bcf417ecd76843f3dfa3c9e8d230cfa60d2469b1db2487b0e48
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.debug as debug
import orca.orca_state as orca_state
import orca.script_utilities as script_utilities
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(script_utilities.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
script_utilities.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def displayedText(self, obj):
"""Returns the text being displayed for an object. Overridden here
because OpenOffice uses symbols (e.g. ">>" for buttons but exposes
more useful information via the accessible's name.
Arguments:
- obj: the object
Returns the text being displayed for an object or None if there isn't
any text being shown.
"""
try:
role = obj.getRole()
except:
return ""
if role == pyatspi.ROLE_PUSH_BUTTON and obj.name:
return obj.name
if role == pyatspi.ROLE_TABLE_CELL:
strings = list(map(self.displayedText, [child for child in obj]))
text = "\n".join(strings)
if text.strip():
return text
try:
text = script_utilities.Utilities.displayedText(self, obj)
except:
return ""
# TODO - JD: This is needed because the default behavior is to fall
# back on the name, which is bogus. Once that has been fixed, this
# hack can go.
if role == pyatspi.ROLE_TABLE_CELL and text == obj.name \
and (self.isSpreadSheetCell(obj) or self.isDocumentCell(obj)):
return ""
return text
def isReadOnlyTextArea(self, obj):
"""Returns True if obj is a text entry area that is read only."""
if not obj.getRole() == pyatspi.ROLE_TEXT:
return False
state = obj.getState()
readOnly = state.contains(pyatspi.STATE_FOCUSABLE) \
and not state.contains(pyatspi.STATE_EDITABLE)
details = debug.getAccessibleDetails(debug.LEVEL_ALL, obj)
debug.println(debug.LEVEL_ALL,
"soffice - isReadOnlyTextArea=%s for %s" % \
(readOnly, details))
return readOnly
def isCellBeingEdited(self, obj):
if not obj:
return False
parent = obj.parent
if parent and parent.getRoleName() == 'text frame':
if self.spreadSheetCellName(parent):
return True
return False
def isSpreadSheetCell(self, obj, startFromTable=False):
"""Return an indication of whether the given obj is a spread sheet
table cell.
Arguments:
- obj: the object to check.
- startFromTable: if True, then the component hierarchy check should
start from a table (as opposed to a table cell).
Returns True if this is a table cell, False otherwise.
"""
cell = obj
if not startFromTable:
obj = obj.parent
try:
table = obj.queryTable()
except:
return self.isCellBeingEdited(cell)
else:
return table.nRows in [65536, 1048576]
def isDocumentCell(self, cell):
isCell = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE_CELL
if not isCell(cell):
cell = pyatspi.findAncestor(cell, isCell)
if not cell or self.isSpreadSheetCell(cell):
return False
isDocument = lambda x: x and x.getRole() == pyatspi.ROLE_DOCUMENT_FRAME
return pyatspi.findAncestor(cell, isDocument) != None
def spreadSheetCellName(self, cell):
nameList = cell.name.split()
for name in nameList:
name = name.replace('.', '')
if not name.isalpha() and name.isalnum():
return name
return ''
def getRowColumnAndTable(self, cell):
"""Returns the (row, column, table) tuple for cell."""
if not (cell and cell.getRole() == pyatspi.ROLE_TABLE_CELL):
return -1, -1, None
cellParent = cell.parent
if cellParent and cellParent.getRole() == pyatspi.ROLE_TABLE_CELL:
cell = cellParent
cellParent = cell.parent
table = cellParent
if table and table.getRole() != pyatspi.ROLE_TABLE:
table = table.parent
try:
iTable = table.queryTable()
except:
return -1, -1, None
index = self.cellIndex(cell)
row = iTable.getRowAtIndex(index)
column = iTable.getColumnAtIndex(index)
return row, column, table
def getShowingCellsInRow(self, obj):
row, column, parentTable = self.getRowColumnAndTable(obj)
try:
table = parentTable.queryTable()
except:
return []
startIndex, endIndex = self.getTableRowRange(obj)
cells = []
for i in range(startIndex, endIndex):
cell = table.getAccessibleAt(row, i)
try:
showing = cell.getState().contains(pyatspi.STATE_SHOWING)
except:
continue
if showing:
cells.append(cell)
return cells
def getTableRowRange(self, obj):
"""If this is spread sheet cell, return the start and end indices
of the spread sheet cells for the table that obj is in. Otherwise
return the complete range (0, parentTable.nColumns).
Arguments:
- obj: a table cell.
Returns the start and end table cell indices.
"""
parent = obj.parent
try:
parentTable = parent.queryTable()
except NotImplementedError:
parentTable = None
startIndex = 0
endIndex = parentTable.nColumns
if self.isSpreadSheetCell(obj):
extents = parent.queryComponent().getExtents(pyatspi.DESKTOP_COORDS)
y = extents.y
leftX = extents.x + 1
leftCell = \
parent.queryComponent().getAccessibleAtPoint(leftX, y, 0)
if leftCell:
table = leftCell.parent.queryTable()
index = self.cellIndex(leftCell)
startIndex = table.getColumnAtIndex(index)
rightX = extents.x + extents.width - 1
rightCell = \
parent.queryComponent().getAccessibleAtPoint(rightX, y, 0)
if rightCell:
table = rightCell.parent.queryTable()
index = self.cellIndex(rightCell)
endIndex = table.getColumnAtIndex(index) + 1
return [startIndex, endIndex]
def rowHeadersForCell(self, obj):
rowHeader, colHeader = self.getDynamicHeadersForCell(obj)
if rowHeader:
return [rowHeader]
return super().rowHeadersForCell(obj)
def columnHeadersForCell(self, obj):
rowHeader, colHeader = self.getDynamicHeadersForCell(obj)
if colHeader:
return [colHeader]
return super().columnHeadersForCell(obj)
def getDynamicHeadersForCell(self, obj, onlyIfNew=False):
if not (self._script.dynamicRowHeaders or self._script.dynamicColumnHeaders):
return None, None
objRow, objCol, table = self.getRowColumnAndTable(obj)
if not table:
return None, None
headersRow = self._script.dynamicColumnHeaders.get(hash(table))
headersCol = self._script.dynamicRowHeaders.get(hash(table))
if headersRow == objRow or headersCol == objCol:
return None, None
getRowHeader = headersCol != None
getColHeader = headersRow != None
if onlyIfNew:
getRowHeader = \
getRowHeader and objRow != self._script.pointOfReference.get("lastRow")
getColHeader = \
getColHeader and objCol!= self._script.pointOfReference.get("lastColumn")
parentTable = table.queryTable()
rowHeader, colHeader = None, None
if getColHeader:
colHeader = parentTable.getAccessibleAt(headersRow, objCol)
if getRowHeader:
rowHeader = parentTable.getAccessibleAt(objRow, headersCol)
return rowHeader, colHeader
def isSameObject(self, obj1, obj2, comparePaths=False, ignoreNames=False):
same = super().isSameObject(obj1, obj2, comparePaths, ignoreNames)
if not same or obj1 == obj2:
return same
# The document frame currently contains just the active page,
# resulting in false positives. So for paragraphs, rely upon
# the equality check.
if obj1.getRole() == obj2.getRole() == pyatspi.ROLE_PARAGRAPH:
return False
# Handle the case of false positives in dialog boxes resulting
# from getIndexInParent() returning a bogus value. bgo#618790.
#
if not obj1.name \
and obj1.getRole() == pyatspi.ROLE_TABLE_CELL \
and obj1.getIndexInParent() == obj2.getIndexInParent() == -1:
top = self.topLevelObject(obj1)
if top and top.getRole() == pyatspi.ROLE_DIALOG:
same = False
return same
def isLayoutOnly(self, obj):
"""Returns True if the given object is a container which has
no presentable information (label, name, displayed text, etc.)."""
try:
role = obj.getRole()
childCount = obj.childCount
except:
role = None
childCount = 0
if role == pyatspi.ROLE_PANEL and childCount == 1:
if obj.name and obj.name == obj[0].name:
return True
if role == pyatspi.ROLE_LIST \
and obj.parent.getRole() == pyatspi.ROLE_COMBO_BOX:
return True
return script_utilities.Utilities.isLayoutOnly(self, obj)
def locateInputLine(self, obj):
"""Return the spread sheet input line. This only needs to be found
the very first time a spread sheet table cell gets focus. We use the
table cell to work back up the component hierarchy until we have found
the common panel that both it and the input line reside in. We then
use that as the base component to search for a component which has a
paragraph role. This will be the input line.
Arguments:
- obj: the spread sheet table cell that has just got focus.
Returns the spread sheet input line component.
"""
if self._script.inputLineForCell:
return self._script.inputLineForCell
isScrollPane = lambda x: x and x.getRole() == pyatspi.ROLE_SCROLL_PANE
scrollPane = pyatspi.findAncestor(obj, isScrollPane)
if not scrollPane:
return None
toolbar = None
for child in scrollPane.parent:
if child and child.getRole() == pyatspi.ROLE_TOOL_BAR:
toolbar = child
break
if not toolbar:
debug.println(debug.LEVEL_INFO, "Calc inputline toolbar not found.")
return
isParagraph = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH
allParagraphs = pyatspi.findAllDescendants(toolbar, isParagraph)
if len(allParagraphs) == 1:
self._script.inputLineForCell = allParagraphs[0]
return self._script.inputLineForCell
def frameAndDialog(self, obj):
"""Returns the frame and (possibly) the dialog containing
the object. Overridden here for presentation of the title
bar information: If the locusOfFocus is a spreadsheet cell,
1) we are not in a dialog and 2) we need to present both the
frame name and the sheet name. So we might as well return the
sheet in place of the dialog so that the default code can do
its thing.
"""
if not self.isSpreadSheetCell(obj):
return script_utilities.Utilities.frameAndDialog(self, obj)
results = [None, None]
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_FRAME:
results[0] = parent
if parent.getRole() == pyatspi.ROLE_TABLE:
results[1] = parent
parent = parent.parent
return results
def isFunctionalDialog(self, obj):
"""Returns true if the window is functioning as a dialog."""
# The OOo Navigator window looks like a dialog, acts like a
# dialog, and loses focus requiring the user to know that it's
# there and needs Alt+F6ing into. But officially it's a normal
# window.
# There doesn't seem to be (an efficient) top-down equivalent
# of utilities.hasMatchingHierarchy(). But OOo documents have
# root panes; this thing does not.
#
rolesList = [pyatspi.ROLE_FRAME,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_PUSH_BUTTON]
if obj.getRole() != rolesList[0]:
# We might be looking at the child.
#
rolesList.pop(0)
while obj and obj.childCount and len(rolesList):
if obj.getRole() != rolesList.pop(0):
return False
obj = obj[0]
return True
def validParent(self, obj):
"""Returns the first valid parent/ancestor of obj. We need to do
this in some applications and toolkits due to bogus hierarchies.
See bugs:
http://www.openoffice.org/issues/show_bug.cgi?id=78117
http://bugzilla.gnome.org/show_bug.cgi?id=489490
Arguments:
- obj: the Accessible object
"""
parent = obj.parent
if parent and parent.getRole() in (pyatspi.ROLE_ROOT_PANE,
pyatspi.ROLE_DIALOG):
app = obj.getApplication()
for frame in app:
if frame.childCount < 1 \
or frame[0].getRole() not in (pyatspi.ROLE_ROOT_PANE,
pyatspi.ROLE_OPTION_PANE):
continue
root_pane = frame[0]
if obj in root_pane:
return root_pane
return parent
def findPreviousObject(self, obj):
"""Finds the object before this one."""
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_FROM:
return relation.getTarget(0)
index = obj.getIndexInParent() - 1
if not (0 <= index < obj.parent.childCount - 1):
obj = obj.parent
index = obj.getIndexInParent() - 1
try:
prevObj = obj.parent[index]
except:
prevObj = obj
return prevObj
def findNextObject(self, obj):
"""Finds the object after this one."""
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_TO:
return relation.getTarget(0)
index = obj.getIndexInParent() + 1
if not (0 < index < obj.parent.childCount):
obj = obj.parent
index = obj.getIndexInParent() + 1
try:
nextObj = obj.parent[index]
except:
nextObj = None
return nextObj
@staticmethod
def _flowsFromOrToSelection(obj):
try:
relationSet = obj.getRelationSet()
except:
return False
flows = [pyatspi.RELATION_FLOWS_FROM, pyatspi.RELATION_FLOWS_TO]
relations = filter(lambda r: r.getRelationType() in flows, relationSet)
targets = [r.getTarget(0) for r in relations]
for target in targets:
try:
nSelections = target.queryText().getNSelections()
except:
return False
if nSelections:
return True
return False
#########################################################################
# #
# Impress-Specific Utilities #
# #
#########################################################################
def drawingView(self, obj=orca_state.locusOfFocus):
"""Attempts to locate the Impress drawing view, which is the
area in which slide editing occurs."""
return pyatspi.findDescendant(self.topLevelObject(obj), self.isDrawingView)
def isDrawingView(self, obj):
"""Returns True if obj is the Impress Drawing View."""
if obj and obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
return (":" in obj.name and "/" in obj.name)
return False
def isInImpress(self, obj=orca_state.locusOfFocus):
"""Returns True if obj is in OOo Impress."""
# Having checked English, Spanish, and Arabic, it would seem
# that the Frame name will end with "Impress", unlocalized.
#
if obj:
topLevel = self.topLevelObject(obj)
if topLevel and not self.isZombie(topLevel) \
and topLevel.name.endswith("Impress"):
return True
return False
def slideAndTaskPanes(self, obj=orca_state.locusOfFocus):
"""Attempts to locate the Impress slide pane and task pane."""
drawingView = self.drawingView(obj)
if not drawingView:
return None, None
parent = drawingView.parent
if parent:
parent = parent.parent
if not parent:
return None, None
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_SPLIT_PANE
panes = pyatspi.findAllDescendants(parent, hasRole)
if not panes:
return None, None
slidePane = taskPane = None
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_DOCUMENT_FRAME
if pyatspi.findAllDescendants(panes[0], hasRole):
slidePane = panes[0]
if len(panes) == 2:
taskPane = panes[1]
else:
taskPane = panes[0]
if len(panes) == 2:
slidePane = panes[1]
return slidePane, taskPane
def slideTitleAndPosition(self, obj):
"""Attempts to obtain the title, position of the slide which contains
or is represented by obj.
Returns a (title, position, count) tuple.
"""
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
dv = obj
else:
dv = self.ancestorWithRole(obj, [pyatspi.ROLE_DOCUMENT_FRAME], [])
if not dv or not self.isDrawingView(dv):
return "", 0, 0
positionAndCount = dv.name.split(":")[1]
position, count = positionAndCount.split("/")
title = ""
for child in dv:
if not child.childCount:
continue
# We want an actual Title.
#
if child.name.startswith("ImpressTitle"):
title = self.displayedText(child[0])
break
# But we'll live with a Subtitle if we can't find a title.
# Unlike Titles, a single subtitle can be made up of multiple
# accessibles.
#
elif child.name.startswith("ImpressSubtitle"):
for line in child:
title = self.appendString(title, self.displayedText(line))
return title, int(position), int(count)
#########################################################################
# #
# Miscellaneous Utilities #
# #
#########################################################################
def isAutoTextEvent(self, event):
"""Returns True if event is associated with text being autocompleted
or autoinserted or autocorrected or autosomethingelsed.
Arguments:
- event: the accessible event being examined
"""
if event.source.getRole() != pyatspi.ROLE_PARAGRAPH:
return False
lastKey, mods = self.lastKeyAndModifiers()
if event.type.startswith("object:text-changed:insert"):
if not event.any_data:
return False
if lastKey == "Tab" and event.any_data != "\t":
return True
if lastKey in ["BackSpace", "ISO_Left_Tab"]:
return True
if event.type.startswith("focus:"):
if lastKey == "Return":
try:
charCount = event.source.queryText().characterCount
except:
charCount = 0
return charCount > 0
return False
def selectedChildren(self, obj):
if not obj:
return []
# Things only seem broken for certain tables, e.g. the Paths table.
# TODO - JD: File the LibreOffice bugs and reference them here.
if obj.getRole() != pyatspi.ROLE_TABLE \
or self.isSpreadSheetCell(obj, True):
return script_utilities.Utilities.selectedChildren(self, obj)
try:
selection = obj.querySelection()
except:
return []
children = []
for i, child in enumerate(obj):
if selection.isChildSelected(i):
children.append(obj[i])
return children
|
pvagner/orca
|
src/orca/scripts/apps/soffice/script_utilities.py
|
Python
|
lgpl-2.1
| 23,873
|
[
"ORCA"
] |
a4094dab595d6991772e43936cd777992c43298b9d4740ed1c15c73c035fc77c
|
# Copyright 2008, 2009, 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import absolute_import
import sys
import textwrap
from distutils.spawn import find_executable
from gi.repository import Gtk, GLib
from . import Utils, Actions, Constants
from ..core import Messages
class SimpleTextDisplay(Gtk.TextView):
"""
A non user-editable gtk text view.
"""
def __init__(self, text=""):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
Gtk.TextView.__init__(self)
self.set_text = self.get_buffer().set_text
self.set_text(text)
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
class TextDisplay(SimpleTextDisplay):
"""
A non user-editable scrollable text view with popup menu.
"""
def __init__(self, text=""):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
SimpleTextDisplay.__init__(self, text)
self.scroll_lock = True
self.connect("populate-popup", self.populate_popup)
def insert(self, line):
"""
Append text after handling backspaces and auto-scroll.
Args:
line: the text to append (string)
"""
line = self._consume_backspaces(line)
self.get_buffer().insert(self.get_buffer().get_end_iter(), line)
self.scroll_to_end()
def _consume_backspaces(self, line):
"""
Removes text from the buffer if line starts with '\b'
Args:
line: a string which may contain backspaces
Returns:
The string that remains from 'line' with leading '\b's removed.
"""
if not line:
return
# for each \b delete one char from the buffer
back_count = 0
start_iter = self.get_buffer().get_end_iter()
while len(line) > back_count and line[back_count] == "\b":
# stop at the beginning of a line
if not start_iter.starts_line():
start_iter.backward_char()
back_count += 1
# remove chars from buffer
self.get_buffer().delete(start_iter, self.get_buffer().get_end_iter())
return line[back_count:]
def scroll_to_end(self):
""" Update view's scroll position. """
if self.scroll_lock:
buf = self.get_buffer()
mark = buf.get_insert()
buf.move_mark(mark, buf.get_end_iter())
self.scroll_mark_onscreen(mark)
def clear(self):
""" Clear all text from buffer. """
buf = self.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
def save(self, file_path):
"""
Save context of buffer to the given file.
Args:
file_path: location to save buffer contents
"""
with open(file_path, "w") as logfile:
buf = self.get_buffer()
logfile.write(buf.get_text(buf.get_start_iter(), buf.get_end_iter(), True))
# Action functions are set by the Application's init function
def clear_cb(self, menu_item, web_view):
""" Callback function to clear the text buffer """
Actions.CLEAR_CONSOLE()
def scroll_back_cb(self, menu_item, web_view):
""" Callback function to toggle scroll lock """
Actions.TOGGLE_SCROLL_LOCK()
def save_cb(self, menu_item, web_view):
""" Callback function to save the buffer """
Actions.SAVE_CONSOLE()
def populate_popup(self, view, menu):
"""Create a popup menu for the scroll lock and clear functions"""
menu.append(Gtk.SeparatorMenuItem())
lock = Gtk.CheckMenuItem(label="Scroll Lock")
menu.append(lock)
lock.set_active(self.scroll_lock)
lock.connect("activate", self.scroll_back_cb, view)
save = Gtk.ImageMenuItem(label="Save Console")
menu.append(save)
save.connect("activate", self.save_cb, view)
clear = Gtk.ImageMenuItem(label="Clear Console")
menu.append(clear)
clear.connect("activate", self.clear_cb, view)
menu.show_all()
return False
class MessageDialogWrapper(Gtk.MessageDialog):
""" Run a message dialog. """
def __init__(
self,
parent,
message_type,
buttons,
title=None,
markup=None,
default_response=None,
extra_buttons=None,
):
"""
Create a modal message dialog.
Args:
message_type: the type of message may be one of:
Gtk.MessageType.INFO
Gtk.MessageType.WARNING
Gtk.MessageType.QUESTION or Gtk.MessageType.ERROR
buttons: the predefined set of buttons to use:
Gtk.ButtonsType.NONE
Gtk.ButtonsType.OK
Gtk.ButtonsType.CLOSE
Gtk.ButtonsType.CANCEL
Gtk.ButtonsType.YES_NO
Gtk.ButtonsType.OK_CANCEL
title: the title of the window (string)
markup: the message text with pango markup
default_response: if set, determines which button is highlighted by default
extra_buttons: a tuple containing pairs of values:
each value is the button's text and the button's return value
"""
Gtk.MessageDialog.__init__(
self,
transient_for=parent,
modal=True,
destroy_with_parent=True,
message_type=message_type,
buttons=buttons,
)
if title:
self.set_title(title)
if markup:
self.set_markup(markup)
if extra_buttons:
self.add_buttons(*extra_buttons)
if default_response:
self.set_default_response(default_response)
def run_and_destroy(self):
response = self.run()
self.hide()
return response
class ErrorsDialog(Gtk.Dialog):
""" Display flowgraph errors. """
def __init__(self, parent, flowgraph):
"""Create a listview of errors"""
Gtk.Dialog.__init__(
self,
title="Errors and Warnings",
transient_for=parent,
modal=True,
destroy_with_parent=True,
)
self.add_buttons(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)
self.set_size_request(750, Constants.MIN_DIALOG_HEIGHT)
self.set_border_width(10)
self.store = Gtk.ListStore(str, str, str)
self.update(flowgraph)
self.treeview = Gtk.TreeView(model=self.store)
for i, column_title in enumerate(["Block", "Aspect", "Message"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
column.set_sort_column_id(i) # liststore id matches treeview id
column.set_resizable(True)
self.treeview.append_column(column)
self.scrollable = Gtk.ScrolledWindow()
self.scrollable.set_vexpand(True)
self.scrollable.add(self.treeview)
self.vbox.pack_start(self.scrollable, True, True, 0)
self.show_all()
def update(self, flowgraph):
self.store.clear()
for element, message in flowgraph.iter_error_messages():
if element.is_block:
src, aspect = element.name, ""
elif element.is_connection:
src = element.source_block.name
aspect = "Connection to '{}'".format(element.sink_block.name)
elif element.is_port:
src = element.parent_block.name
aspect = "{} '{}'".format(
"Sink" if element.is_sink else "Source", element.name
)
elif element.is_param:
src = element.parent_block.name
aspect = "Param '{}'".format(element.name)
else:
src = aspect = ""
self.store.append([src, aspect, message])
def run_and_destroy(self):
response = self.run()
self.hide()
return response
def show_about(parent, config):
ad = Gtk.AboutDialog(transient_for=parent)
ad.set_program_name(config.name)
ad.set_name("")
ad.set_license(config.license)
py_version = sys.version.split()[0]
ad.set_version("{} (Python {})".format(config.version, py_version))
try:
ad.set_logo(Gtk.IconTheme().load_icon("gnuradio-grc", 64, 0))
except GLib.Error:
Messages.send("Failed to set window logo\n")
# ad.set_comments("")
ad.set_copyright(config.license.splitlines()[0])
ad.set_website(config.website)
ad.connect("response", lambda action, param: action.hide())
ad.show()
def show_help(parent):
""" Display basic usage tips. """
markup = textwrap.dedent(
"""\
<b>Usage Tips</b>
\n\
<u>Add block</u>: drag and drop or double click a block in the block
selection window.
<u>Rotate block</u>: Select a block, press left/right on the keyboard.
<u>Change type</u>: Select a block, press up/down on the keyboard.
<u>Edit parameters</u>: double click on a block in the flow graph.
<u>Make connection</u>: click on the source port of one block, then
click on the sink port of another block.
<u>Remove connection</u>: select the connection and press delete, or
drag the connection.
\n\
*Press Ctrl+K or see menu for Keyboard - Shortcuts
\
"""
)
markup = markup.replace("Ctrl", Utils.get_modifier_key())
MessageDialogWrapper(
parent, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, title="Help", markup=markup
).run_and_destroy()
def show_keyboard_shortcuts(parent):
""" Display keyboard shortcut-keys. """
markup = textwrap.dedent(
"""\
<b>Keyboard Shortcuts</b>
\n\
<u>Ctrl+N</u>: Create a new flowgraph.
<u>Ctrl+O</u>: Open an existing flowgraph.
<u>Ctrl+S</u>: Save the current flowgraph or save as for new.
<u>Ctrl+W</u>: Close the current flowgraph.
<u>Ctrl+Z</u>: Undo a change to the flowgraph.
<u>Ctrl+Y</u>: Redo a change to the flowgraph.
<u>Ctrl+A</u>: Selects all blocks and connections.
<u>Ctrl+P</u>: Screen Capture of the Flowgraph.
<u>Ctrl+Shift+P</u>: Save the console output to file.
<u>Ctrl+L</u>: Clear the console.
<u>Ctrl+E</u>: Show variable editor.
<u>Ctrl+F</u>: Search for a block by name.
<u>Ctrl+Q</u>: Quit.
<u>F1</u> : Help menu.
<u>F5</u> : Generate the Flowgraph.
<u>F6</u> : Execute the Flowgraph.
<u>F7</u> : Kill the Flowgraph.
<u>Ctrl+Shift+S</u>: Save as the current flowgraph.
<u>Ctrl+Shift+D</u>: Create a duplicate of current flow graph.
<u>Ctrl+X/C/V</u>: Edit-cut/copy/paste.
<u>Ctrl+D/B/R</u>: Toggle visibility of disabled blocks or
connections/block tree widget/console.
<u>Shift+T/M/B/L/C/R</u>: Vertical Align Top/Middle/Bottom and
Horizontal Align Left/Center/Right respectively of the
selected block.
\
"""
)
markup = markup.replace("Ctrl", Utils.get_modifier_key())
MessageDialogWrapper(
parent,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
title="Keyboard - Shortcuts",
markup=markup,
).run_and_destroy()
def show_get_involved(parent):
"""Get Involved Instructions"""
markup = textwrap.dedent(
"""\
<tt><b>Welcome to GNU Radio Community!</b></tt>
\n\
<tt>For more details on contributing to GNU Radio and getting engaged with our great community visit </tt><a href="https://www.gnuradio.org/get-involved">here</a>.
\n\
<tt>You can also join our <a href="https://slack.gnuradio.org/">Slack Channel</a>, IRC Channel (#gnuradio) or contact through our <a href="https://lists.gnu.org/mailman/listinfo/discuss-gnuradio">mailing list(discuss-gnuradio)</a></tt>.
\
"""
)
MessageDialogWrapper(
parent,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.CLOSE,
title="Get - Involved",
markup=markup,
).run_and_destroy()
def show_types(parent):
""" Display information about standard data types. """
colors = [(name, color) for name, key, sizeof, color in Constants.CORE_TYPES]
max_len = 10 + max(len(name) for name, code in colors)
message = "\n".join(
'<span background="{color}"><tt>{name}</tt></span>'
"".format(color=color, name=Utils.encode(name).center(max_len))
for name, color in colors
)
MessageDialogWrapper(
parent,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
title="Types - Color Mapping",
markup=message,
).run_and_destroy()
def show_missing_xterm(parent, xterm):
markup = textwrap.dedent(
"""\
The xterm executable {0!r} is missing.
You can change this setting in your gnuradio.conf, in section [grc], 'xterm_executable'.
\n\
(This message is shown only once)\
"""
).format(xterm)
MessageDialogWrapper(
parent,
message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.OK,
title="Warning: missing xterm executable",
markup=markup,
).run_and_destroy()
def choose_editor(parent, config):
"""
Give the option to either choose an editor or use the default.
"""
if config.editor and find_executable(config.editor):
return config.editor
buttons = (
"Choose Editor",
Gtk.ResponseType.YES,
"Use Default",
Gtk.ResponseType.NO,
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
)
response = MessageDialogWrapper(
parent,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.NONE,
title="Choose Editor",
markup="Would you like to choose the editor to use?",
default_response=Gtk.ResponseType.YES,
extra_buttons=buttons,
).run_and_destroy()
# Handle the initial default/choose/cancel response
# User wants to choose the editor to use
editor = ""
if response == Gtk.ResponseType.YES:
file_dialog = Gtk.FileChooserDialog(
"Select an Editor...",
None,
Gtk.FileChooserAction.OPEN,
("gtk-cancel", Gtk.ResponseType.CANCEL, "gtk-open", Gtk.ResponseType.OK),
transient_for=parent,
)
file_dialog.set_select_multiple(False)
file_dialog.set_local_only(True)
file_dialog.set_current_folder("/usr/bin")
try:
if file_dialog.run() == Gtk.ResponseType.OK:
editor = file_dialog.get_filename()
finally:
file_dialog.hide()
# Go with the default editor
elif response == Gtk.ResponseType.NO:
try:
process = None
if sys.platform.startswith("linux"):
process = find_executable("xdg-open")
elif sys.platform.startswith("darwin"):
process = find_executable("open")
if process is None:
raise ValueError("Can't find default editor executable")
# Save
editor = config.editor = process
except Exception:
Messages.send(
">>> Unable to load the default editor. Please choose an editor.\n"
)
if editor == "":
Messages.send(">>> No editor selected.\n")
return editor
|
skoslowski/gnuradio
|
grc/gui/Dialogs.py
|
Python
|
gpl-3.0
| 15,860
|
[
"VisIt"
] |
74f1372f2406429c048027560862b15da841d66e1eb403f5298928a61d340bc2
|
# Plot pdf and cdf of standard normal
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy.stats import norm
X = np.linspace(-3, 3, 500)
rv = norm(0, 1)
fig, ax = plt.subplots()
ax.plot(X, rv.pdf(X))
plt.title("Gaussian pdf")
pml.save_fig("gaussian1d.pdf")
plt.show()
fig, ax = plt.subplots()
ax.plot(X, rv.cdf(X))
plt.title("Gaussian cdf")
pml.save_fig("gaussianCdf.pdf")
plt.show()
|
probml/pyprobml
|
scripts/gauss_plot.py
|
Python
|
mit
| 449
|
[
"Gaussian"
] |
9031797c185f55db4d0f1ad4271de3f92ea231968ce8056c45ceaeee81484c01
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import logging
import time
import mooseutils
import collections
import uuid
from ..base import components, HTMLRenderer
from ..tree import tokens, html, latex, pages
from ..common import exceptions
from . import command, core
LOG = logging.getLogger(__name__)
def make_extension(**kwargs):
return CivetExtension(**kwargs)
CivetTestBadges = tokens.newToken('CivetTestBadges', tests=list())
CivetTestReport = tokens.newToken('CivetTestReport', tests=list(), source=None)
class CivetExtension(command.CommandExtension):
"Adds ability to include CIVET links."""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
config['remotes'] = (dict(), "Remote CIVET repositories to pull result; each item in the dict should have another dict with a 'url' and 'repo' key.")
config['download_test_results'] = (True, "Automatically download and aggregate test results for the current merge commits.")
config['generate_test_reports'] = (True, "Generate test report pages, if results exist from download or local file(s).")
config['test_reports_location'] = ('civet', "The local directory where the generated test reports will be inserted.")
config['test_results_cache'] = (os.path.join(os.getenv('HOME'), '.local', 'share', 'civet', 'jobs'),
"Default location for downloading CIVET results.")
return config
def __init__(self, *args, **kwargs):
command.CommandExtension.__init__(self, *args, **kwargs)
self.__database = dict()
self.__test_result_numbers = dict()
self.__has_test_reports = False
def hasTestReports(self):
"""Returns True if the test report pages were generated."""
return self.__has_test_reports
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, CivetResultsCommand())
self.addCommand(reader, CivetMergeResultsCommand())
self.addCommand(reader, CivetTestBadgesCommand())
self.addCommand(reader, CivetTestReportCommand())
renderer.add('CivetTestBadges', RenderCivetTestBadges())
renderer.add('CivetTestReport', RenderCivetTestReport())
if isinstance(renderer, HTMLRenderer):
renderer.addCSS('civet_moose', "css/civet_moose.css")
def results(self, name):
"""Return the test results for the supplied name."""
return self.__database.get(name, None)
def testBaseFileName(self, test):
"""
Return the test page filename base.
"""
return self.__test_result_numbers.get(test, None)
def init(self):
"""(override) Generate test reports."""
# Test result database
if self.get('download_test_results', True):
start = time.time()
LOG.info("Collecting CIVET results...")
sites = list()
hashes = mooseutils.git_merge_commits()
for category in self.get('remotes').values():
sites.append((category['url'], category['repo']))
self.__database = mooseutils.get_civet_results(hashes=hashes,
sites=sites,
cache=self.get('test_results_cache'),
possible=['OK', 'FAIL', 'DIFF', 'TIMEOUT'],
logger=LOG)
LOG.info("Collecting CIVET results complete [%s sec.]", time.time() - start)
if not self.__database and self.get('generate_test_reports', True):
LOG.warning("'generate_test_reports' is being disabled, it requires results to exist but none were located.")
self.update(generate_test_reports=False)
if self.get('generate_test_reports', True):
self.__has_test_reports = True
start = time.time()
LOG.info("Creating CIVET result pages...")
report_root = self.get('test_reports_location')
if not self.translator.findPage(report_root, exact=True, throw_on_zero=False):
self.translator.addPage(pages.Directory(report_root, source=report_root))
src = pages.Source('{}/index.md'.format(report_root), source='{}/index.md'.format(report_root),
read=False, tokenize=False)
self.translator.addPage(src)
count = 0
for key, item in self.__database.items():
name = 'result_{}'.format(count)
self.__test_result_numbers[key] = name
count += 1
fullname = '{}/{}.md'.format(report_root, name)
src = pages.Source(fullname, source=fullname, read=False, tokenize=False, key=key)
self.translator.addPage(src)
LOG.info("Creating CIVET result pages complete [%s sec.]", time.time() - start)
def postTokenize(self, page, ast):
"""
Add CIVET test report token.
"""
key = page.get('key', None)
if key is not None:
h = core.Heading(ast, level=1)
tokens.String(h, content='Test Results')
core.Punctuation(h, content=':')
core.LineBreak(h)
core.Space(h)
tokens.String(h, content=key)
CivetTestReport(ast, tests=[key])
def postRender(self, page, results):
"""
Add CIVET links to test result pages.
"""
report_root = self.get('test_reports_location')
if page.source == '{}/index.md'.format(report_root):
ol = html.Tag(results, 'ol')
for key, item in self.__database.items():
fullname = self.testBaseFileName(key) + '.html'
html.Tag(html.Tag(ol, 'li'), 'a', href=fullname, string=key)
class CivetCommandBase(command.CommandComponent):
COMMAND = 'civet'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['remote'] = (None, "The category to utilize for remote result lookup, see CivetExtension.")
settings['url'] = (None, "Override for the repository url provided in the 'category' option, e.g. 'https://civet.inl.gov'.")
settings['repo'] = (None, "Override for the repository name provided in the 'category' option, e.g. 'idaholab/moose'.")
return settings
def getCivetInfo(self):
available = self.extension.get('remotes')
if len(available) > 0:
category = available.get(self.settings.get('remote') or list(available.keys())[0])
url = self.settings.get('url') or category['url']
repo = self.settings.get('repo') or category['repo']
else:
url = self.settings.get('url')
repo = self.settings.get('repo')
return url, repo
class CivetMergeResultsCommand(CivetCommandBase):
SUBCOMMAND = 'mergeresults'
@staticmethod
def defaultSettings():
settings = CivetCommandBase.defaultSettings()
return settings
def createToken(self, parent, info, page):
site, repo = self.getCivetInfo()
rows = []
for sha in mooseutils.git_merge_commits():
url = '{}/sha_events/{}/{}'.format(site, repo, sha)
link = core.Link(parent, url=url, string=sha)
core.LineBreak(parent)
return parent
class CivetResultsCommand(CivetCommandBase):
SUBCOMMAND = 'results'
@staticmethod
def defaultSettings():
settings = CivetCommandBase.defaultSettings()
return settings
def createToken(self, parent, info, page):
site, repo = self.getCivetInfo()
sha = mooseutils.git_commit()
url = '{}/sha_events/{}/{}'.format(site, repo, sha)
if info['inline']:
return core.Link(parent, url=url)
else:
return core.Link(parent, string=sha, url=url)
class CivetTestBadgesCommand(CivetCommandBase):
SUBCOMMAND = 'badges'
@staticmethod
def defaultSettings():
config = CivetCommandBase.defaultSettings()
config['tests'] = (None, "The name of the test(s) to report.")
return config
def createToken(self, parent, info, page):
return CivetTestBadges(parent, tests=self.settings.get('tests').split())
class CivetTestReportCommand(CivetCommandBase):
SUBCOMMAND = 'report'
@staticmethod
def defaultSettings():
config = CivetCommandBase.defaultSettings()
config['tests'] = (None, "The name of the test(s) to report.")
return config
def createToken(self, parent, info, page):
return CivetTestReport(parent, tests=self.settings.get('tests').split())
class RenderCivetTestBadges(components.RenderComponent):
def createLatex(self, parent, token, page):
pass
def createHTML(self, parent, token, page):
pass
def createMaterialize(self, parent, token, page):
div = html.Tag(parent, 'div', class_='moose-civet-badges')
for test in token['tests']:
counts = collections.defaultdict(int)
results = self.extension.results(test)
if results:
for job, recipes in results.items():
for recipe in recipes:
counts[recipe.status] += 1
base = self.extension.testBaseFileName(test)
if self.extension.hasTestReports() and (base is not None):
report_root = self.extension.get('test_reports_location')
fname = os.path.join(self.translator.get("destination"), report_root, base + '.html')
location = os.path.relpath(fname, os.path.dirname(page.destination))
a = html.Tag(div, 'a', href=location)
else:
a = html.Tag(div, 'span')
for key, count in counts.items():
badge = html.Tag(a, 'span', class_="new badge", string=str(count))
badge['data-badge-caption'] = key
badge['data-status'] = key.lower()
if 'OK' not in counts:
parent.parent.addClass('moose-civet-fail')
class RenderCivetTestReport(components.RenderComponent):
def createLatex(self, parent, token, page):
pass
def createHTML(self, parent, token, page):
pass
def createMaterialize(self, parent, token, page):
for key in token['tests']:
results = self.extension.results(key)
div = html.Tag(parent, 'div', class_='moose-civet-test-report')
tbl = html.Tag(div, 'table')
tr = html.Tag(tbl, 'tr')
html.Tag(tr, 'th', string='Status')
html.Tag(tr, 'th', string='Job')
html.Tag(tr, 'th', string='Recipe')
for job, tests in results.items():
for item in tests:
tr = html.Tag(tbl, 'tr')
td = html.Tag(tr, 'td', string=item.status)
td['data-status'] = item.status.lower()
tr_job = html.Tag(tr, 'td')
html.Tag(tr, 'td', string=item.recipe)
link = html.Tag(tr_job, 'span')
html.Tag(link, 'a', href='{}/job/{}'.format(item.url, job), string=str(job))
|
nuclear-wizard/moose
|
python/MooseDocs/extensions/civet.py
|
Python
|
lgpl-2.1
| 11,701
|
[
"MOOSE"
] |
3be3b8fb2a61144415875348e4e79337e8f83930702f73f7396103aef841d86d
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-12 20:46:42
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-06-06 14:38:39
from __future__ import print_function, division, absolute_import
import pytest
from marvin import marvindb
from flask import url_for
@pytest.mark.parametrize('page', [('index_page', 'Marvin:index')], ids=['index'], indirect=True)
class TestIndexPage(object):
def test_assert_index_template_used(self, page, get_templates):
page.load_page('get', page.url)
assert '' == page.data
template, context = get_templates[0]
assert 'index.html' == template.name
@pytest.mark.parametrize('page', [('index_page', 'Marvin:database')], ids=['database'], indirect=True)
class TestDb(object):
def test_db_works(self, page, release):
page.load_page('get', page.url, params={'release': release})
data = {'plate': 7443}
page.assert_webjson_success(data)
def test_db_post_fails(self, page, release):
page.load_page('post', page.url, params={'release': release})
page.assert405('allowed method should be get')
@pytest.mark.parametrize('page', [('index_page', 'selectmpl')], ids=['selectmpl'], indirect=True)
class TestSelectMPL(object):
def test_select_mpl(self, page, release, drpver, dapver):
page.load_page('post', page.url, params={'release': release})
data = {'current_release': release, 'current_drpver': drpver, 'current_dapver': dapver}
page.assert_webjson_success(data)
self._release_in_session(page, data)
def _release_in_session(self, page, data):
with page.client.session_transaction() as sess:
sess['release'] = data['current_release']
sess['drpver'] = data['current_drpver']
sess['dapver'] = data['current_dapver']
@pytest.mark.parametrize('page', [('index_page', 'getgalidlist')], ids=['getgalid'], indirect=True)
class TestGetGalIdList(object):
def test_getgalid_success(self, page, release):
page.load_page('post', page.url, params={'release': release})
data = ['8485', '8485-1901', '1-209232']
page.assert200(message='response status should be 200 for ok')
page.assertListIn(data, page.json)
def test_getgalid_fail(self, page, release):
marvindb.datadb = None
page.load_page('post', page.url, params={'release': release})
data = ['']
page.assert200(message='response status should be 200 for ok')
assert data == page.json
@pytest.mark.parametrize('page', [('index_page', 'galidselect')], ids=['galidselect'], indirect=True)
@pytest.mark.parametrize('name, id, galid', [('plate', 'plate', 8485),
('galaxy', 'plateifu', '8485-1901'),
('galaxy', 'mangaid', '1-209232'),
('main', None, None)])
class TestGalIdSelect(object):
def get_url(self, name, galid):
if name == 'plate':
return url_for('plate_page.Plate:get', plateid=galid)
elif name == 'plateifu':
return url_for('galaxy_page.Galaxy:get', galid=galid)
elif name == 'mangaid':
return url_for('galaxy_page.Galaxy:get', galid=galid)
elif name is None:
return url_for('index_page.Marvin:index')
def test_get_galid(self, page, release, name, id, galid):
data = {'galid': galid, 'release': release}
page.load_page('get', page.url, params=data)
redirect_url = self.get_url(id, galid)
if id:
page.assert_redirects(redirect_url, 'page should be redirected to {0} page'.format(name))
else:
page.assert422(message='response should be 422 for no name input')
@pytest.mark.xfail(reason='until we can deal with Credentials')
@pytest.mark.parametrize('page', [('index_page', 'login', False)], ids=['login'], indirect=True)
@pytest.mark.parametrize('data, exp',
[({'username': '', 'password': ''}, {'ready': False, 'status': -1, 'message': 'Login is not valid!'}),
({'username': 'sdss', 'password': 'password'}, {'ready': False, 'status': -1, 'message': 'Login sdss is not valid!'}),
({'username': 'bac29', 'password': 'password'}, {'ready': False, 'status': -1, 'message': 'Login bac29 is not valid!'}),
({'username': 'test', 'password': 'test'}, {'ready': True, 'status': 1, 'message': 'Login Successful!', 'membername': 'SDSS User'})],
ids=['no_input', 'wrong_pass', 'wrong_user', 'success'])
class TestLogin(object):
def test_login(self, inspection, page, release, data, exp):
data['release'] = release
page.load_page('post', page.url, params=data)
page.assert200('response status should be 200 for ok')
assert exp['status'] == page.json['result']['status']
assert exp['message'] == page.json['result']['message']
if 'membername' in exp and 'membername' in page.json['result']:
assert exp['membername'] == page.json['result']['membername']
|
sdss/marvin
|
tests/web/test_index.py
|
Python
|
bsd-3-clause
| 5,261
|
[
"Brian",
"Galaxy"
] |
9cab806651db5dc4af3d039ce34667de15855fbd04d6e5603966b984addf66c0
|
#!/usr/bin/python
"""
Copyright 2010 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
#
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
sid = form.getfirst('gh_sid', '')
spawnName = form.getfirst('spawn', '')
galaxy = form.getfirst('galaxy', '')
planets = form.getfirst('planets', '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
spawnName = dbShared.dbInsertSafe(spawnName)
galaxy = dbShared.dbInsertSafe(galaxy)
planets = dbShared.dbInsertSafe(planets)
# Get a session
logged_state = 0
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
# Main program
print 'Content-type: text/html\n'
if (logged_state > 0):
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
if (dbShared.galaxyState(galaxy) == 1):
markAll = 0
cursor.execute('SELECT spawnID, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER FROM tResources WHERE galaxy=' + galaxy + ' AND spawnName="' + spawnName + '";')
row = cursor.fetchone()
if (row != None):
spawnID = str(row[0])
if (planets == "all"):
markAll = 1
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + currentUser + "' WHERE spawnID=" + spawnID + ";"
else:
# try to look up planet by name if an ID was not provided
if (planets.isdigit() != True):
planets = dbShared.getPlanetID(planets)
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + currentUser + "' WHERE spawnID=" + spawnID + " AND planetID=" + planets + ";"
cursor.execute(sqlStr)
# check if any planets still available
cursor.execute("SELECT enteredBy FROM tResourcePlanet WHERE spawnID=" + str(spawnID) + " AND unavailable IS NULL;")
row = cursor.fetchone()
if (row == None):
markAll = 1
# update main table when all planets unavailable
if (markAll == 1):
sqlStr = "UPDATE tResources SET unavailable=NOW(), unavailableBy='" + currentUser + "' WHERE spawnID=" + str(spawnID) + ";"
cursor.execute(sqlStr)
# add cleanup event
if not planets.isdigit():
planets = 0
dbShared.logEvent("INSERT INTO tResourceEvents (spawnID, userID, eventTime, eventType, planetID) VALUES (" + str(spawnID) + ",'" + currentUser + "',NOW(),'r'," + str(planets) + ");",'r',currentUser,galaxy,str(spawnID))
result = spawnName
cursor.close()
else:
result = "Error: That Galaxy is Inactive."
else:
result = "Error: Could not connect to database"
conn.close()
else:
result = "Error: You must be logged in to mark a resource unavailable."
print result
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
clreinki/GalaxyHarvester
|
markUnavailable.py
|
Python
|
agpl-3.0
| 4,018
|
[
"Galaxy"
] |
428a8dc9e870159a5f84185767f3034556dee15d5d07f0a81db56db6627cf351
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The actual plugin view form
"""
import logging
import os
from PyQt4 import QtGui
from openlp.core.lib import PluginStatus, Registry, translate
from .plugindialog import Ui_PluginViewDialog
log = logging.getLogger(__name__)
class PluginForm(QtGui.QDialog, Ui_PluginViewDialog):
"""
The plugin form provides user control over the plugins OpenLP uses.
"""
def __init__(self, parent=None):
"""
Constructor
"""
super(PluginForm, self).__init__(parent)
self.activePlugin = None
self.programaticChange = False
self.setupUi(self)
self.load()
self._clearDetails()
# Right, now let's put some signals and slots together!
self.pluginListWidget.itemSelectionChanged.connect(self.onPluginListWidgetSelectionChanged)
self.statusComboBox.currentIndexChanged.connect(self.onStatusComboBoxChanged)
def load(self):
"""
Load the plugin details into the screen
"""
self.pluginListWidget.clear()
self.programaticChange = True
self._clearDetails()
self.programaticChange = True
pluginListWidth = 0
for plugin in self.plugin_manager.plugins:
item = QtGui.QListWidgetItem(self.pluginListWidget)
# We do this just to make 100% sure the status is an integer as
# sometimes when it's loaded from the config, it isn't cast to int.
plugin.status = int(plugin.status)
# Set the little status text in brackets next to the plugin name.
if plugin.status == PluginStatus.Disabled:
status_text = translate('OpenLP.PluginForm', '%s (Disabled)')
elif plugin.status == PluginStatus.Active:
status_text = translate('OpenLP.PluginForm', '%s (Active)')
else:
# PluginStatus.Inactive
status_text = translate('OpenLP.PluginForm', '%s (Inactive)')
item.setText(status_text % plugin.name_strings['singular'])
# If the plugin has an icon, set it!
if plugin.icon:
item.setIcon(plugin.icon)
self.pluginListWidget.addItem(item)
pluginListWidth = max(pluginListWidth, self.fontMetrics().width(
translate('OpenLP.PluginForm', '%s (Inactive)') % plugin.name_strings['singular']))
self.pluginListWidget.setFixedWidth(pluginListWidth + self.pluginListWidget.iconSize().width() + 48)
def _clearDetails(self):
"""
Clear the plugin details widgets
"""
self.statusComboBox.setCurrentIndex(-1)
self.versionNumberLabel.setText('')
self.aboutTextBrowser.setHtml('')
self.statusComboBox.setEnabled(False)
def _setDetails(self):
"""
Set the details of the currently selected plugin
"""
log.debug('PluginStatus: %s', str(self.activePlugin.status))
self.versionNumberLabel.setText(self.activePlugin.version)
self.aboutTextBrowser.setHtml(self.activePlugin.about())
self.programaticChange = True
status = PluginStatus.Active
if self.activePlugin.status == PluginStatus.Active:
status = PluginStatus.Inactive
self.statusComboBox.setCurrentIndex(status)
self.statusComboBox.setEnabled(True)
self.programaticChange = False
def onPluginListWidgetSelectionChanged(self):
"""
If the selected plugin changes, update the form
"""
if self.pluginListWidget.currentItem() is None:
self._clearDetails()
return
plugin_name_singular = self.pluginListWidget.currentItem().text().split('(')[0][:-1]
self.activePlugin = None
for plugin in self.plugin_manager.plugins:
if plugin.status != PluginStatus.Disabled:
if plugin.name_strings['singular'] == plugin_name_singular:
self.activePlugin = plugin
break
if self.activePlugin:
self._setDetails()
else:
self._clearDetails()
def onStatusComboBoxChanged(self, status):
"""
If the status of a plugin is altered, apply the change
"""
if self.programaticChange or status == PluginStatus.Disabled:
return
if status == PluginStatus.Inactive:
self.application.set_busy_cursor()
self.activePlugin.toggle_status(PluginStatus.Active)
self.application.set_normal_cursor()
self.activePlugin.app_startup()
else:
self.activePlugin.toggle_status(PluginStatus.Inactive)
status_text = translate('OpenLP.PluginForm', '%s (Inactive)')
if self.activePlugin.status == PluginStatus.Active:
status_text = translate('OpenLP.PluginForm', '%s (Active)')
elif self.activePlugin.status == PluginStatus.Inactive:
status_text = translate('OpenLP.PluginForm', '%s (Inactive)')
elif self.activePlugin.status == PluginStatus.Disabled:
status_text = translate('OpenLP.PluginForm', '%s (Disabled)')
self.pluginListWidget.currentItem().setText(
status_text % self.activePlugin.name_strings['singular'])
def _get_plugin_manager(self):
"""
Adds the plugin manager to the class dynamically
"""
if not hasattr(self, '_plugin_manager'):
self._plugin_manager = Registry().get('plugin_manager')
return self._plugin_manager
plugin_manager = property(_get_plugin_manager)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
|
marmyshev/item_title
|
openlp/core/ui/pluginform.py
|
Python
|
gpl-2.0
| 8,163
|
[
"Brian"
] |
4aa373dd01bc249449603b40deacfc1bde345390573f41bf34d430ac141b8255
|
import numpy as np
def spherical_proposal(theta, prop_scale=.05):
""" jitters theta with spherical gaussian noise """
thp = theta + prop_scale*np.random.randn(len(theta))
if np.any(thp < 0):
#print thp
return None
else:
return thp
def whitened_mh( th, #current state of hyper parameters
f, #current state of GP governed by th
whiten_func, #function handle to whiten a GP sample
unwhiten_func, #function handle to unwhiten a GP sample
like_func, #likelihood func
ln_prior, #log prior over hyper parameters
prop_scale = None, #proposal distribution scale
prop_dist = spherical_proposal):
""" returns a sample of theta (cov funciont hyper parameters
given the state of the MVN f. It first whitens f into nu,
leaving that fixed for a higher acceptance rate
INPUT:
- th : current state of the cov func hyperparams
- f : current state of the latent gaussian proc/vars
- whiten : function takes in (th, f) to find whitened version of f
e.g. K_th = Cov_Func(th, x)
L_th = chol(K_th)
nu = inv(L_th) * f
user specified, so this can be optimized version can be
passed in
- unwhiten: function takes in (th_p, nu) and computes unwhitened version
of nu
e.g. K_thp = Cov_Func(th_p, nu)
L_thp = chol(K_thp)
fp = L_thp * nu
again, it's user specified so optimized versions can be passed in
- Lfn : likelihood function, func of f
- prop_dist: proposal distribution for th (function of curr th)
OUTPUT:
- th-new : new covariance kernel parameters
- f-new : new version of the multivariate normal
"""
#set proposal function (scale takes over)
if prop_scale is not None:
prop_dist = lambda(th): spherical_proposal(th, prop_scale=prop_scale)
# solve for nu ~ Normal(0, I) (whiten)
# whitening function incorporates Covariance
# function (but optimized to handle kronecker stuff)
nu = whiten_func(th, f)
# propose th' ~ q(th' ; th)
thp = prop_dist(th)
if thp is None:
print "bad proposal, returning none"
return th, f, False, like_func(f) + ln_prior(th)
# compute implied values f' = L_thp*nu (unwhiten)
fp = unwhiten_func(thp, nu)
# mh accept/reject
ll_p = like_func(fp) + ln_prior(thp)
#print "=========="
#print " proposal likelihood: ", like_func(fp)
#print " proposal prior: ", ln_prior(thp)
ll_o = like_func(f) + ln_prior(th)
#print " current likelihood: ", like_func(f)
#print " current prior: ", ln_prior(th)
if -np.random.exponential() < ll_p - ll_o:
return thp, fp, True, ll_p
else:
return th, f, False, ll_o
|
andymiller/flecks
|
inference/whitened_mh.py
|
Python
|
mit
| 2,940
|
[
"Gaussian"
] |
5daab7854d9be3df1286b99d9fe525a82a67eaacdca5822ba297cbd667f05718
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from kivy.cache import Cache
from ORCA.utils.FileName import cFileName
from ORCA.utils.LoadFile import LoadFile
__all__ = ['CachedFile','ClearCache']
uCacheName:str = 'CachedFiles'
Cache.register(category = uCacheName, timeout=120)
def CachedFile(*,oFileName: cFileName) -> str:
"""
Returns the content of a file as string, using a cache if already loaded
"""
uFileContent: str = Cache.get(category = uCacheName, key = oFileName.string)
if uFileContent is None:
uFileContent = LoadFile(oFileName=oFileName)
Cache.append(category = uCacheName, key = oFileName.string, obj = uFileContent, timeout = 120)
return uFileContent
def ClearCache() -> None:
""" Clears the cache and frees memory """
Cache.remove(category = uCacheName)
|
thica/ORCA-Remote
|
src/ORCA/utils/CachedFile.py
|
Python
|
gpl-3.0
| 1,668
|
[
"ORCA"
] |
ec5a4462fc28f81bed3fb02ed6b959fcb6f6aff803c2f8564eab8b98662d7695
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.callbacks as callbacks
buildings = {
'AA': 'Agricultural Administration Building, ' \
'2120 Fyffe Road, Columbus, Ohio, 43210',
'AC': 'ATI Student Activities Center, ' \
'Agriculture Tech Inst, Wooster, Ohio, 44691',
'AE': 'Agricultural Engineering, ' \
'590 Woody Hayes Drive, Columbus, Ohio, 43210',
'AF': 'Wagner, ATI Fairgrounds, Wooster, Ohio, 44691',
'AG': 'ATI Greenhouse 3, Agriculture Tech Inst, Wooster, Ohio, 44691',
'AK': 'Applecreek, Agriculture Tech Inst, Wooster, Ohio, 44691',
'AM': 'Allied Medical Professions Building, ' \
'1583 Perry Street, Columbus, Ohio, 43210',
'AO': 'Airport Operations Building, ' \
'2160 W Case Road, Columbus, Ohio, 43235',
'AP': 'Arps Hall, 1945 N High Street, Columbus, Ohio, 43210',
'AR': 'ATI Residence Hall, Agriculture Tech Inst, Wooster, Ohio, 44691',
'AS': 'Animal Science Building, 2029 Fyffe Road, Columbus, Ohio, 43210',
'AT': 'Halterman Hall (ATI), 1328 Dover Road, Wooster, Ohio, 44691',
'AV': 'Aviation Building, 164 W 19th Avenue, Columbus, Ohio, 43210',
'BA': 'Browning Amphitheatre, Mirror Lake, Columbus, Ohio, 43210',
'BE': 'Baker Systems Engineering, 1971 Neil Avenue, Columbus, Ohio, 43210',
'BF': 'Bromfield Hall, 1660 University Drive, Mansfield, Ohio, 44906',
'BH': 'Bevis Hall, 1080 Carmack Road, Columbus, Ohio, 43210',
'BI': 'Biological Sciences Building, ' \
'484 W 12th Avenue, Columbus, Ohio, 43210',
'BK': 'Bricker Hall, 190 N Oval Mall, Columbus, Ohio, 43210',
'BL': 'Boyd Laboratory, 155 W Woodruff Avenue, Columbus, Ohio, 43210',
'BO': 'Bolz Hall, 2036 Neil Avenue Mall, Columbus, Ohio, 43210',
'BR': 'Brown Hall, 190 W 17th Avenue, Columbus, Ohio, 43210',
'BZ': 'Botany & Zoology Building, 1735 Neil Avenue, Columbus, Ohio, 43210',
'CC': 'Central Classroom Building, ' \
'2009 Millikin Road, Columbus, Ohio, 43210',
'CE': 'Celeste Laboratory Of Chemistry, ' \
'120 W 18th Avenue, Columbus, Ohio, 43210',
'CH': 'Cockins Hall, 1958 Neil Avenue, Columbus, Ohio, 43210',
'CK': 'Cook Hall, 4240 Campus Drive, Lima, Ohio, 45804',
'CL': 'Caldwell Laboratory, 2024 Neil Avenue, Columbus, Ohio, 43210',
'CM': 'Campbell Hall, 1787 Neil Avenue, Columbus, Ohio, 43210',
'CT': 'Fawcett Center For Tomorrow, ' \
'2400 Olentangy River, Columbus, Ohio, 43210',
'CV': 'Converse Hall, 2121 Tuttle Park Pl, Columbus, Ohio, 43210',
'CX': 'Community Extension Center, ' \
'905 Mt Vernon Avenue, Columbus, Ohio, 43203',
'CZ': 'Cunz Hall, 1841 Millikin Road, Columbus, Ohio, 43210',
'DB': 'Derby Hall, 154 N Oval Mall, Columbus, Ohio, 43210',
'DE': 'Denney Hall, 164 W 17th Avenue, Columbus, Ohio, 43210',
'DI': 'Drinko Hall, 55 W 12th Avenue, Columbus, Ohio, 43210',
'DK': 'Dakan Hall, 674 W Lane Avenue, Columbus, Ohio, 43210',
'DL': 'Dreese Laboratories, 2015 Neil Avenue, Columbus, Ohio, 43210',
'DN': 'Doan Hall, 410 W 10th Avenue, Columbus, Ohio, 43210',
'DO': 'Dodd Hall, 480 W 9th Avenue, Columbus, Ohio, 43210',
'DR': 'Drake Union, 1849 Cannon Drive, Columbus, Ohio, 43210',
'DU': 'Dulles Hall, 230 W 17th Avenue, Columbus, Ohio, 43210',
'DV': 'Davis Medical Research Center, ' \
'480 W 9th Avenue, Columbus, Ohio, 43210',
'EA': '209 W Eighteenth Building, ' \
'209 West 18th Avenue, Columbus, Ohio, 43210',
'EC': 'Eisenhower Memorial Center, ' \
'1640 University Drive, Mansfield, Ohio, 44906',
'EL': 'Evans Laboratory, 88 W 18th Avenue, Columbus, Ohio, 43210',
'EN': 'Enarson Hall, 154 W 12th Avenue, Columbus, Ohio, 43210',
'FA': 'Fisher Auditorium, OARDC-Wooster, Wooster, Ohio, 44691',
'FF': 'French Field House, 460 Woody Hayes Drive, Columbus, Ohio, 43210',
'FH': 'Founders Hall, 1179 University Drive, Newark, Ohio, 43055',
'FL': 'Fontana Laboratories, 116 W 19th Avenue, Columbus, Ohio, 43210',
'FR': 'Fry Hall, 338 W 10th Avenue, Columbus, Ohio, 43210',
'FT': 'Fallerius Technical Educ Center, ' \
'2441 Kenwood Circle, Mansfield, Ohio, 44906',
'GA': 'Galvin Hall, 4240 Campus Drive, Lima, Ohio, 45804',
'GB': 'General Biology Annex, 1791 Neil Avenue, Columbus, Ohio, 43210',
'GH': 'Golf Course Club House, 3605 Tremont Road, Columbus, Ohio, 43221',
'GL': 'Goss Laboratory, 1925 Coffey Road, Columbus, Ohio, 43210',
'GR': 'Graves Hall, 333 W 10th Avenue, Columbus, Ohio, 43210',
'HA': 'Hayes Hall, 108 N Oval Mall, Columbus, Ohio, 43210',
'HC': 'Hopkins Hall, 128 N Oval Mall, Columbus, Ohio, 43210',
'HG': 'Howlett Greenhouses, 680 Tharp St, Columbus, Ohio, 43210',
'HH': 'Hagerty Hall, 1775 College Road, Columbus, Ohio, 43210',
'HI': 'Hitchcock Hall, 2070 Neil Avenue, Columbus, Ohio, 43210',
'HK': 'Haskett Hall, 156 W 19th Avenue, Columbus, Ohio, 43210',
'HL': 'Hale Hall, 153 W 12th Avenue, Columbus, Ohio, 43210',
'HM': 'Hamilton Hall, 1645 Neil Avenue, Columbus, Ohio, 43210',
'HN': 'Kuhn Honors House, 220 W 12th Avenue, Columbus, Ohio, 43210',
'HP': 'Hopewell Hall (Newark), 1179 University Drive, Newark, Ohio, 43055',
'HS': 'Health Science Library, 376 W 10th Avenue, Columbus, Ohio, 43210',
'HT': 'Howlett Hall, 2001 Fyffe Court, Columbus, Ohio, 43210',
'HU': 'Hughes Hall, 1899 College Road, Columbus, Ohio, 43210',
'IH': 'Independence Hall, 1923 Neil Avenue Mall, Columbus, Ohio, 43210',
'IR': 'Ice Rink, 390 Woody Hayes Drive, Columbus, Ohio, 43210',
'IV': 'Ives Hall, 2073 Neil Avenue, Columbus, Ohio, 43210',
'JA': 'James Cancer Hosp & Research Inst, ' \
'300 W 10th Avenue, Columbus, Ohio, 43210',
'JR': 'Journalism Building, 242 W 18th Avenue, Columbus, Ohio, 43210',
'KH': 'Kottman Hall, 2021 Coffey Road, Columbus, Ohio, 43210',
'KL': 'Koffolt Laboratories, 140 W 19th Avenue, Columbus, Ohio, 43210',
'KR': '1224 Kinnear Road, 1224 Kinnear Road, Columbus, Ohio, 43212',
'LC': 'Ohio Legal Center, 33 W 11th Avenue, Columbus, Ohio, 43201',
'LI': 'Main Library, 1858 Neil Avenue Mall, Columbus, Ohio, 43210',
'LK': 'Larkins Hall, 337 W 17th Avenue, Columbus, Ohio, 43210',
'LO': 'Lord Hall, 124 W 17th Avenue, Columbus, Ohio, 43210',
'LS': 'Reed Student Activities Building, ' \
'4240 Campus Drive, Lima, Ohio, 45804',
'LT': 'Lincoln Tower, 1800 Cannon Drive, Columbus, Ohio, 43210',
'LZ': 'Lazenby Hall, 1827 Neil Avenue Mall, Columbus, Ohio, 43210',
'MA': 'Mathematics Building, 231 W 18th Avenue, Columbus, Ohio, 43210',
'MC': 'McCampbell Hall, 1581 Dodd Drive, Columbus, Ohio, 43210',
'ME': 'Meiling Hall, 370 W 9th Avenue, Columbus, Ohio, 43210',
'ML': 'Mendenhall Laboratory, 125 S Oval Mall, Columbus, Ohio, 43210',
'MM': 'Mershon Auditorium, 1871 N High St, Columbus, Ohio, 43210',
'MN': '1501 Neil Avenue, 1501 Neil Avenue, Columbus, Ohio, 43201',
'MO': 'Mount Hall, 1050 Carmack Road, Columbus, Ohio, 43210',
'MP': 'McPherson Chemical Laboratory, ' \
'140 W 18th Avenue, Columbus, Ohio, 43210',
'MQ': 'MacQuigg Laboratory, 105 W Woodruff Avenue, Columbus, Ohio, 43210',
'MR': 'Morrill Hall (Marion), 1465 Mt Vernon Avenue, Marion, Ohio, 43302',
'MS': 'Means Hall, 1654 Upham Drive, Columbus, Ohio, 43210',
'MT': 'Morrill Tower, 1900 Cannon Drive, Columbus, Ohio, 43210',
'NE': 'Neil-17th Building, 1949 Neil Avenue, Columbus, Ohio, 43210',
'NH': 'Newton Hall, 1585 Neil Avenue, Columbus, Ohio, 43210',
'NL': 'Neil Hall, 1634 Neil Avenue, Columbus, Ohio, 43210',
'NR': 'Jesse Owens Recreation Center North, ' \
'2151 Neil Avenue, Columbus, Ohio, 43210',
'OR': 'Orton Hall, 155 S Oval Mall, Columbus, Ohio, 43210',
'OU': 'Ohio Union, 1739 N High St, Columbus, Ohio, 43210',
'OV': 'Ovalwood Hall (Mansfield), ' \
'1680 University Drive, Mansfield, Ohio, 44906',
'OX': 'Oxley Hall, 1712 Neil Avenue, Columbus, Ohio, 43210',
'PA': 'Page Hall, 1810 College Road, Columbus, Ohio, 43210',
'PH': 'Postle Hall, 305 W 12th Avenue, Columbus, Ohio, 43210',
'PK': 'Parks Hall, 500 W 12th Avenue, Columbus, Ohio, 43210',
'PL': 'Plumb Hall, 2027 Coffey Road, Columbus, Ohio, 43210',
'PN': '1478 Pennsylvania Avenue, ' \
'1478 Pennsylvania Avenue, Columbus, Ohio, 43201',
'PO': 'Pomerene Hall, 1760 Neil Avenue, Columbus, Ohio, 43210',
'PR': 'Pressey Hall, 1070 Carmack Road, Columbus, Ohio, 43210',
'RA': 'Ramseyer Hall, 29 W Woodruff Avenue, Columbus, Ohio, 43210',
'RC': 'Research Center, 1314 Kinnear Road, Columbus, Ohio, 43212',
'RD': 'Rhodes Hall, 450 W 10th Avenue, Columbus, Ohio, 43210',
'RF': 'Riffe Building, 496 W 12th Avenue, Columbus, Ohio, 43210',
'RH': 'Rightmire Hall, 1060 Carmack Road, Columbus, Ohio, 43210',
'RL': 'Robinson Laboratory, 206 W 18th Avenue, Columbus, Ohio, 43210',
'RY': 'Royer Student Activities Center, ' \
'85 Curl Drive, Columbus, Ohio, 43210',
'SA': 'Foundry Metals & Glass Building, ' \
'1055 Carmack Road, Columbus, Ohio, 43210',
'SC': 'Scott Hall, 1090 Carmack Road, Columbus, Ohio, 43210',
'SD': 'Alber Student Center, 1465 Mt Vernon Avenue, Marion, Ohio, 43302',
'SE': 'Steeb Hall, Se 70 W 11th Avenue, Columbus, Ohio, 43210',
'SH': 'Stillman Hall, 1947 College Road, Columbus, Ohio, 43210',
'SI': 'Sisson Hall, 1900 Coffey Road, Columbus, Ohio, 43210',
'SJ': 'St John Arena, 410 Woody Hayes Drive, Columbus, Ohio, 43210',
'SK': 'Skou Hall, Agriculture Tech Inst, Wooster, Ohio, 44691',
'SL': 'Starling Loving Hall A, 320 W 10th Avenue, Columbus, Ohio, 43210',
'SM': 'Smith Laboratory, 174 W 18th Avenue, Columbus, Ohio, 43210',
'SN': 'Gibraltar Stonlab, Gibraltar Island, Put-in-bay, Ohio, 43456',
'SP': 'ATI Shop, Agriculture Tech Inst, Wooster, Ohio, 44691',
'SR': 'Jesse Owens Recreation Center South, ' \
'175 W 11th Avenue, 175 West 11th Avenue, Ohio, OH',
'ST': 'Ohio Stadium, 411 Woody Hayes Drive, Columbus, Ohio, 43210',
'SU': 'Sullivant Hall, 1813 N High St, Columbus, Ohio, 43210',
'TE': 'Marion Technical Education Center, ' \
'1467 Mt Vernon Avenue, Marion, Ohio, 43302',
'TL': 'Technical Education Building (Lima), ' \
'4240 Campus Drive, Lima, Ohio, 45804',
'TO': 'Townshend Hall, 1885 Neil Avenue Mall, Columbus, Ohio, 43210',
'TT': 'Taylor Tower, 50 Curl Drive, Columbus, Ohio, 43210',
'UH': 'University Hall, 230 N Oval Mall, Columbus, Ohio, 43210',
'UP': 'Upham Hall, 473 W 12th Avenue, Columbus, Ohio, 43210',
'VE': 'Veterinary Hospital, 601 Vernon Tharp St, Columbus, Ohio, 43210',
'VG': 'Van De Graaff Laboratory, 1302 Kinnear Road, Columbus, Ohio, 43212',
'VH': 'Vivian Hall, 2121 Fyffe Road, Columbus, Ohio, 43210',
'WA': 'Watts Hall, 2041 College Road, Columbus, Ohio, 43210',
'WE': 'Welding Engineering Laboratory, ' \
'190 W 19th Avenue, Columbus, Ohio, 43210',
'WG': 'Weigel Hall, 1866 College Road, Columbus, Ohio, 43210',
'WI': 'Wiseman Hall, 400 W 12th Avenue, Columbus, Ohio, 43210',
'WL': 'Williams, OARDC-Wooster, Wooster, Ohio, 44691',
'WO': 'Womens Field House, 1801 Neil Avenue, Columbus, Ohio, 43210',
'WR': 'Jesse Owens Recreation Center West, ' \
'1031 Carmack Road, Columbus, Ohio, 43210',
'WS': 'Wilce Health Center, 1875 Millikin Road, Columbus, Ohio, 43210',
'WX': 'Wexner Center, 1850 College Road, Columbus, Ohio, 43210'
}
class OSU(callbacks.Plugin):
def email(self, irc, msg, args):
"""<first name> <middle initial> <last name>
Returns possible email address matches for the given name.
"""
s = '.'.join(args)
url = 'http://www.ohio-state.edu/cgi-bin/inquiry2.cgi?keyword=%s' % s
try:
data = utils.web.getUrl(url)
emails = []
for line in data.splitlines():
line.strip()
if 'Published address' in line:
emails.append(line.split()[-1])
if len(emails) == 0:
irc.reply('There seem to be no matches to that name.')
elif len(emails) == 1:
irc.reply(emails[0])
else:
irc.reply('Possible matches: %s.' % ', '.join(emails))
except Exception, e:
irc.error(utils.exnToString(e))
email = thread(email)
def building(self, irc, msg, args, building):
"""<building abbreviation>
Returns the address and full name of an OSU building based on its
standard two-letter abbreviation.
"""
try:
irc.reply(buildings[building.upper()])
except KeyError:
irc.reply('I don\'t know of any such OSU building.')
building = wrap(building, ['something'])
Class = OSU
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/OSU/plugin.py
|
Python
|
gpl-3.0
| 14,694
|
[
"COLUMBUS"
] |
1d7b06ee983802701e32e3594ad61b774be249dc1180a7ea451acc0192844ca7
|
import collections
url_exemple = "http://www.google.com/"
cards = [
{
'type': 'CB',
'card_number': '4970100000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000007',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
]
theme_args = collections.OrderedDict([
("success_footer_msg_return", "Success footer msg test"),
("cancel_footer_msg_return", "Cancel footer msg test"),
("secure_message", "Secure message test"),
("secure_message_register", "Secure message register test"),
("site_id_label", "Site ID label test"),
("css_for_payment", url_exemple+"payment.css"),
("css_for_payment_mobile", url_exemple+"mobile_payment.css"),
("header_for_mail", url_exemple+"mail_header.html"),
("footer_for_mail", url_exemple+"footer_mail.html"),
("shop_logo", url_exemple+"logo.png"),
])
payment_config_args = {
"first": 5000,
"count": 2,
"period": 5
}
payment_args = {
# Base fields
"vads_amount": "10000",
"vads_capture_delay": "2",
"vads_payment_cards": "CB;Visa",
"vads_return_mode": "NONE",
"vads_validation_mode": "1",
"vads_url_success": url_exemple,
"vads_url_referral": url_exemple,
"vads_url_refused": url_exemple,
"vads_url_cancel": url_exemple,
"vads_url_error": url_exemple,
"vads_url_return": url_exemple,
"vads_user_info": "Abbath Doom Occulta",
"vads_shop_name": "Immortal",
"vads_redirect_success_timeout": "1",
"vads_redirect_success_message": "Tragedies Blows At Horizon",
"vads_redirect_error_timeout": "1",
"vads_redirect_error_message": "At The Heart Of Winter",
# customer fields
"vads_cust_address": "Oeschstr.",
"vads_cust_address_number": "9",
"vads_cust_country": "GE",
"vads_cust_email": "test@nuclearblast.de",
"vads_cust_id": "1",
"vads_cust_name": "NUCLEAR BLAST",
"vads_cust_cell_phone": "+49 7162 9280-0",
"vads_cust_phone": "+49 7162 9280 26",
"vads_cust_title": "Guitarist",
"vads_cust_city": "Donzdorf",
"vads_cust_state": "Donzdorf",
"vads_cust_zip": "73072",
"vads_language": "fr",
# order fields
"vads_order_id": "1234567890",
"vads_order_info": "Order test info 1",
"vads_order_info2": "Order test info 2",
"vads_order_info3": "Order test info 3",
# shipping fields
"vads_ship_to_name": "NUCLEAR BLAST",
"vads_ship_to_street_number": "9",
"vads_ship_to_street": "Oeschstr. 9",
"vads_ship_to_street2": "...",
"vads_ship_to_zip": "73072",
"vads_ship_to_city": "Donzdorf",
"vads_ship_to_country": "GE",
"vads_ship_to_phone_num": "+49 7162 9280-0",
"vads_ship_to_state": "Donzdorf"
}
|
zehome/django-payzen
|
django_payzen/tests/data.py
|
Python
|
mit
| 6,745
|
[
"BLAST"
] |
9fda7c63315aaa73130160a4d5c6a1068cfa71954b56c7638c0f6edc76edc07a
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import os
import numpy as np
from nose.plugins.attrib import attr
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.test.common import tmpdir
def test_load_fcidump_psi4_h2():
mol = IOData.from_file(context.get_fn('test/FCIDUMP.psi4.h2'))
assert mol.core_energy == 0.7151043364864863E+00
assert mol.nelec == 2
assert mol.ms2 == 0
assert mol.one_mo.shape == (10, 10)
assert mol.one_mo[0, 0] == -0.1251399119550580E+01
assert mol.one_mo[2, 1] == 0.9292454365115077E-01
assert mol.one_mo[1, 2] == 0.9292454365115077E-01
assert mol.one_mo[9, 9] == 0.9035054979531029E+00
assert mol.two_mo.shape == (10, 10, 10, 10)
assert mol.two_mo[0, 0, 0, 0] == 0.6589928924251115E+00
# Check physicist's notation and symmetry
assert mol.two_mo[6, 1, 5, 0] == 0.5335846565304321E-01
assert mol.two_mo[5, 1, 6, 0] == 0.5335846565304321E-01
assert mol.two_mo[6, 0, 5, 1] == 0.5335846565304321E-01
assert mol.two_mo[5, 0, 6, 1] == 0.5335846565304321E-01
assert mol.two_mo[1, 6, 0, 5] == 0.5335846565304321E-01
assert mol.two_mo[1, 5, 0, 6] == 0.5335846565304321E-01
assert mol.two_mo[0, 6, 1, 5] == 0.5335846565304321E-01
assert mol.two_mo[0, 5, 1, 6] == 0.5335846565304321E-01
assert mol.two_mo[9, 9, 9, 9] == 0.6273759381091796E+00
def test_load_fcidump_molpro_h2():
mol = IOData.from_file(context.get_fn('test/FCIDUMP.molpro.h2'))
assert mol.core_energy == 0.7151043364864863E+00
assert mol.nelec == 2
assert mol.ms2 == 0
assert mol.one_mo.shape == (4, 4)
assert mol.one_mo[0, 0] == -0.1245406261597530E+01
assert mol.one_mo[0, 1] == -0.1666402467335385E+00
assert mol.one_mo[1, 0] == -0.1666402467335385E+00
assert mol.one_mo[3, 3] == 0.3216193420753873E+00
assert mol.two_mo.shape == (4, 4, 4, 4)
assert mol.two_mo[0, 0, 0, 0] == 0.6527679278914691E+00
# Check physicist's notation and symmetry
assert mol.two_mo[3, 0, 2, 1] == 0.7756042287284058E-01
assert mol.two_mo[2, 0, 3, 1] == 0.7756042287284058E-01
assert mol.two_mo[3, 1, 2, 0] == 0.7756042287284058E-01
assert mol.two_mo[2, 1, 3, 0] == 0.7756042287284058E-01
assert mol.two_mo[0, 3, 1, 2] == 0.7756042287284058E-01
assert mol.two_mo[0, 2, 1, 3] == 0.7756042287284058E-01
assert mol.two_mo[1, 3, 0, 2] == 0.7756042287284058E-01
assert mol.two_mo[1, 2, 0, 3] == 0.7756042287284058E-01
assert mol.two_mo[3, 3, 3, 3] == 0.7484308847738417E+00
def test_dump_load_fcidimp_consistency_ao():
# Setup IOData
mol0 = IOData.from_file(context.get_fn('test/water.xyz'))
obasis = get_gobasis(mol0.coordinates, mol0.numbers, '3-21G')
# Compute stuff for fcidump file. test without transforming to mo basis
mol0.core_energy = compute_nucnuc(mol0.coordinates, mol0.pseudo_numbers)
mol0.nelec = 10
mol0.ms2 = 1
mol0.one_mo = (
obasis.compute_kinetic() +
obasis.compute_nuclear_attraction(mol0.coordinates, mol0.pseudo_numbers))
mol0.two_mo = obasis.compute_electron_repulsion()
# Dump to a file and load it again
with tmpdir('horton.io.test.test_molpro.test_dump_load_fcidump_consistency_ao') as dn:
mol0.to_file('%s/FCIDUMP' % dn)
mol1 = IOData.from_file('%s/FCIDUMP' % dn)
# Compare results
np.testing.assert_equal(mol0.core_energy, mol1.core_energy)
np.testing.assert_equal(mol0.nelec, mol1.nelec)
np.testing.assert_equal(mol0.ms2, mol1.ms2)
np.testing.assert_almost_equal(mol0.one_mo, mol1.one_mo)
np.testing.assert_almost_equal(mol0.two_mo, mol1.two_mo)
def check_dump_load_fcidimp_consistency_mo(fn):
# Setup IOData
mol0 = IOData.from_file(fn)
# Compute stuff for fcidump file.
one = mol0.obasis.compute_kinetic()
mol0.obasis.compute_nuclear_attraction(mol0.coordinates, mol0.pseudo_numbers, one)
two = mol0.obasis.compute_electron_repulsion()
# transform to mo basis, skip core energy
(mol0.one_mo,), (mol0.two_mo,) = transform_integrals(one, two, 'tensordot', mol0.orb_alpha)
# Dump to a file and load it again
with tmpdir('horton.io.test.test_molpro.test_dump_load_fcidump_consistency_mo_%s' % os.path.basename(fn)) as dn:
fn = '%s/FCIDUMP' % dn
mol0.to_file(fn)
mol1 = IOData.from_file(fn)
# Compare results
assert mol1.core_energy == 0.0
assert mol1.nelec == 0
assert mol1.ms2 == 0
np.testing.assert_almost_equal(mol0.one_mo, mol1.one_mo)
np.testing.assert_almost_equal(mol0.two_mo, mol1.two_mo)
def test_dump_load_fcidimp_consistency_mo_water_sto3g():
check_dump_load_fcidimp_consistency_mo(context.get_fn('test/h2o_sto3g.fchk'))
@attr('slow')
def test_dump_load_fcidimp_consistency_mo_water_ccpvdz():
check_dump_load_fcidimp_consistency_mo(context.get_fn('test/water_ccpvdz_pure_hf_g03.fchk'))
def test_dump_load_fcidimp_consistency_mo_active():
# Setup IOData
mol0 = IOData.from_file(context.get_fn('test/h2o_sto3g.fchk'))
# Compute stuff for fcidump file.
one = mol0.obasis.compute_kinetic()
mol0.obasis.compute_nuclear_attraction(mol0.coordinates, mol0.pseudo_numbers, one)
two = mol0.obasis.compute_electron_repulsion()
# transform to mo basis and use only active space
enn = compute_nucnuc(mol0.coordinates, mol0.pseudo_numbers)
mol0.one_mo, mol0.two_mo, mol0.core_energy = split_core_active(one, two, enn, mol0.orb_alpha, 2, 4)
mol0.nelec = 10
mol0.ms2 = 0
# Dump to a file and load it again
with tmpdir('horton.io.test.test_molpro.test_dump_load_fcidump_consistency_mo_active') as dn:
mol0.to_file('%s/FCIDUMP' % dn)
mol1 = IOData.from_file('%s/FCIDUMP' % dn)
# Compare results
np.testing.assert_equal(mol0.core_energy, mol1.core_energy)
np.testing.assert_equal(mol0.nelec, mol1.nelec)
np.testing.assert_equal(mol0.ms2, mol1.ms2)
np.testing.assert_almost_equal(mol0.one_mo, mol1.one_mo)
np.testing.assert_almost_equal(mol0.two_mo, mol1.two_mo)
|
FarnazH/horton
|
horton/io/test/test_molpro.py
|
Python
|
gpl-3.0
| 6,822
|
[
"Molpro",
"Psi4"
] |
c59e34c722181e33bd76d1ca2b1bc38b1e2a2b6226b390b975902cc814ce8f63
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import logging
import bs4
import mooseutils
LOG = logging.getLogger(__name__)
class LatexNavigableString(bs4.element.NavigableString):
"""
An empty class to allow the Translator to convert the text without escaping content.
"""
pass
class Element(object):
"""
Base class for converting html tag to latex.
The basic conversion by changing the html tags to a "latex" tag and adding meta data to the tag
attributes. See Translator for meta data use.
Args:
name[str]: (Required) The tag name to test against.
attrs[dict]: (Optional) A dictionary of attributes and values that are required (see test())
Kwargs (Optional):
The following keywords are converted to tag meta data for latex conversion. The values
passed in for each of the keywords should be a str type.
begin --> data-latex-begin
The command to place prior to content.
begin_prefix --> data-latex-begin-prefix
The text (e.g., '\n') that should be placed prior to the begin command.
begin_suffix --> data-latex-begin-suffix
The text (e.g., '\n') that should be placed after to the begin command.
end --> data-latex-end
The command to place after the content.
end_prefix --> data-latex-end-prefix
The text (e.g., '\n') that should be placed prior to the end command.
end_suffix --> data-latex-end-suffix
The text (e.g., '\n') that should be placed after to the end command.
open -> data-latex-open
Text placed prior to all begin commands and content.
close -> data-latex-close
Text placed after content and all end commands.
content -> data-latex-content
Text used to replace the content of the tag including children
"""
def __init__(self, name=None, attrs=None, strip=False, **kwargs):
if name is None:
raise mooseutils.MooseException("The 'name' argument variable must be set.")
self._name = name
self._attrs = attrs if attrs is not None else dict()
self._strip = strip
self._data = dict()
keys = ['begin', 'begin_prefix', 'begin_suffix', 'end', 'end_prefix', 'end_suffix', 'open',
'close', 'content', 'escape']
for k in keys:
self._data.setdefault('data-latex-{}'.format(k.replace('_', '-')), kwargs.get(k, None))
self.__soup = None
def __call__(self, soup, tag):
self.__soup = soup
if self.test(tag):
self.convert(tag)
tag.name = 'latex'
def test(self, tag):
"""
Return True if the tag is to be converted.
Inputs:
tag[bs4.element.Tag]: The current parsed html tag.
"""
if tag.name == 'latex':
return False
if not isinstance(tag, bs4.element.Tag) or tag.name != self._name:
return False
for key, value in self._attrs.iteritems():
if (key not in tag.attrs) or (value not in tag[key]):
return False
return True
@staticmethod
def strip(tag):
"""
Strip whitespace from string descendants: lstrip on first and rstrip on last.
Inputs:
tag[bs4.element.Tag]: The current parsed html tag.
"""
strs = list(tag.strings)
strs[0].replace_with(strs[0].lstrip())
strs[-1].replace_with(strs[-1].rstrip())
def convert(self, tag):
"""
Convert the html tag to a "latex" tag.
Inputs:
tag[bs4.element.Tag]: The current parsed html tag.
"""
tag.name = 'latex'
for key, value in self._data.iteritems():
if value is not None:
tag.attrs.setdefault(key, value)
if 'data-latex-content' in tag.attrs:
tag.replace_with(self.new(string=LatexNavigableString(tag.attrs['data-latex-content'])))
if self._strip:
self.strip(tag)
def new(self, name='latex', string=None):
"""
Create a new bs4.element.Tag object.
Inputs:
name[str]: (optional) The name of the tag to create.
string[str]: (optional) The string content to add to the tag.
"""
ntag = self.__soup.new_tag(name)
if string:
ntag.string = string
return ntag
def curly(self, **kwargs):
"""
Create a latex curly bracket tag.
"""
ntag = self.new(**kwargs)
ntag.attrs['data-latex-begin'] = '{'
ntag.attrs['data-latex-end'] = '}'
return ntag
def square(self, **kwargs):
"""
Create a latex square bracket tag.
"""
ntag = self.new(**kwargs)
ntag.attrs['data-latex-begin'] = '['
ntag.attrs['data-latex-end'] = ']'
return ntag
class Command(Element):
"""
Object for creating latex commands (e.g., \\par).
"""
def __init__(self, command=None, **kwargs):
super(Command, self).__init__(**kwargs)
self._command = command
if self._command is None:
raise mooseutils.MooseException("The 'command' argument variable must be set.")
self._data['data-latex-begin'] = '\\{}'.format(self._command)
class ArgumentCommand(Command):
"""
Object for creating latex commands with an argument (e.g., \\section{foo}).
"""
def convert(self, tag):
super(ArgumentCommand, self).convert(tag)
new = self.curly()
for child in reversed(tag.contents):
new.insert(0, child.extract())
tag.append(new)
class Environment(Command):
"""
Object for creating latex environment (e.g., \\begin{table} ... \\end{table}).
"""
def __init__(self, **kwargs):
kwargs.setdefault('begin_suffix', '\n')
kwargs.setdefault('end_prefix', '\n')
super(Environment, self).__init__(**kwargs)
self._data['data-latex-begin'] = '\\begin{%s}' % self._command
self._data['data-latex-end'] = '\\end{%s}' % self._command
class Heading(Command):
"""
Converts html heading tag to latex section.
"""
def convert(self, tag):
"""
Creates desired section with label.
"""
super(Heading, self).convert(tag)
id_ = tag.get('id', None)
if id_:
string = tag.string.wrap(self.curly())
label = self.new()
label['data-latex-begin'] = '\\label'
string.append(label)
text = self.curly()
text.string = id_
label.append(text)
else:
tag.string.wrap(self.curly())
class PreCode(Environment):
"""
Converts <pre><code> blocks to verbatim latex.
"""
def __init__(self, **kwargs):
kwargs.setdefault('name', 'pre')
kwargs.setdefault('command', 'verbatim')
super(PreCode, self).__init__(**kwargs)
def test(self, tag):
"""
Makes sure <code> block is directly within a <pre>
"""
return super(PreCode, self).test(tag) and (tag.code)
def convert(self, tag):
"""
Sets the <code> block to be converted.
"""
super(PreCode, self).convert(tag)
tag.code.name = 'latex'
class Table(Environment):
"""
Creates tablular to html table tag.
"""
def __init__(self, **kwargs):
kwargs.setdefault('command', 'tabular')
super(Table, self).__init__(name='table', **kwargs)
def convert(self, tag):
"""
Adds the column settings to the environment.
"""
super(Table, self).convert(tag)
tag['data-latex-begin-suffix'] = ''
cols = self.curly()
cols.string = 'l'*self.numColumns(tag)
cols['data-latex-close'] = '\n'
tag.insert(0, cols)
@staticmethod
def numColumns(tag):
"""
Determines the number of columns.
"""
return len(tag.tbody.find('tr').find_all('td'))
class TableHeaderFooter(Element):
"""
thead, tfoot conversion.
"""
def convert(self, tag):
"""
Wraps table header and footers with horizontal rule.
"""
super(TableHeaderFooter, self).convert(tag)
tag['data-latex-open'] = '\\hline\n'
tag['data-latex-close'] = '\\hline'
class TableItem(Element):
"""
Converts td, th tags.
"""
def convert(self, tag):
"""
Adds closing '&' or '\\\\' to a table item.
"""
super(TableItem, self).convert(tag)
if tag.find_next_sibling(self._name):
tag['data-latex-close'] = ' & '
else:
tag['data-latex-close'] = ' \\\\'
class ListItem(Command):
"""
Convert li tag.
"""
def __init__(self, **kwargs):
super(ListItem, self).__init__(name='li', command='item', **kwargs)
def convert(self, tag):
"""
Adds a new-line to close the tag for the last item.
"""
super(ListItem, self).convert(tag)
tag['data-latex-begin-suffix'] = ' '
if tag.find_next_sibling(self._name):
tag['data-latex-close'] = '\n'
class Image(ArgumentCommand):
"""
Converts <img> tag.
"""
def __init__(self, **kwargs):
kwargs.setdefault('end_suffix', '\n')
super(Image, self).__init__(name='img',
command='includegraphics',
attrs={'src':''},
**kwargs)
def convert(self, tag):
"""
Places the filename in the includegraphics command and errors if the file is not found.
"""
tag.string = tag['src']
super(Image, self).convert(tag)
if not os.path.exists(tag.string):
LOG.error('Image file does not exist: %s', tag.string)
class Figure(Environment):
"""
Convers <figure> tag.
"""
def __init__(self, **kwargs):
kwargs.setdefault('name', 'figure')
kwargs.setdefault('command', 'figure')
super(Figure, self).__init__(**kwargs)
def convert(self, tag):
"""
Adds label to the figure.
"""
super(Figure, self).convert(tag)
if 'id' in tag.attrs:
label = self.curly()
label.attrs['data-latex-begin-prefix'] = '\\label'
label.attrs['data-latex-end-suffix'] = '\n'
label.string = tag.attrs['id']
tag.insert(0, label)
else:
tag['data-latex-begin'] = '\\begin{%s*}' % self._command
tag['data-latex-end'] = '\\end{%s*}' % self._command
class LinkElement(ArgumentCommand):
"""
Convert <a> to hyperlink.
"""
def __init__(self, **kwargs):
super(LinkElement, self).__init__(name='a', attrs={'href':''}, command='href', **kwargs)
def convert(self, tag):
"""
Extracts the tag href attribute and adds to \\href{} command.
"""
super(LinkElement, self).convert(tag)
url = self.curly()
url.string = tag.get('href', '#')
tag.insert(0, url)
|
Chuban/moose
|
python/MooseDocs/html2latex/elements.py
|
Python
|
lgpl-2.1
| 12,543
|
[
"MOOSE"
] |
7dda4a1b0763079024422559408c58fc377e2de4b3c530b40b6e6a2d6365ac0d
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
hpsspy.os._os
~~~~~~~~~~~~~
Contains the actual functions in :mod:`hpsspy.os`.
"""
from .. import HpssOSError
from ..util import HpssFile, hsi
__all__ = ['chmod', 'listdir', 'makedirs', 'mkdir', 'stat', 'lstat', 'walk']
def chmod(path, mode):
"""Reproduces the behavior of :func:`os.chmod` for HPSS files.
Parameters
----------
path : :class:`str`
File to chmod.
mode : :class:`str` or :class:`int`
Desired file permissions. This mode will be converted to a string.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
"""
out = hsi('chmod', str(mode), path)
if out.startswith('**'):
raise HpssOSError(out)
return
def listdir(path):
"""List the contents of an HPSS directory, similar to :func:`os.listdir`.
Parameters
----------
path : :class:`str`
Directory to examine.
Returns
-------
:class:`list`
A list of :class:`~hpsspy.util.HpssFile` objects.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
"""
from . import linere
out = hsi('ls', '-la', path)
if out.startswith('**'):
raise HpssOSError(out)
lines = out.split('\n')
lspath = path # sometimes you don't get the path echoed back.
files = list()
for f in lines:
if len(f) == 0:
continue
m = linere.match(f)
if m is None:
if f.endswith(':'):
lspath = f.strip(': ')
else:
raise HpssOSError("Could not match line!\n{0}".format(f))
else:
g = m.groups()
files.append(HpssFile(lspath, *g))
#
# Create a unique set of filenames for use below.
#
fileset = set([f.name for f in files])
#
# Go back and identify htar files
#
for f in files:
if f.name.endswith('.tar') and f.name + '.idx' in fileset:
f.ishtar = True
return files
def makedirs(path, mode=None):
"""Reproduces the behavior of :func:`os.makedirs`.
Parameters
----------
path : :class:`str`
Directory to create.
mode : :class:`str`, optional
String representation of the octal directory mode.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
Notes
-----
Unlike :func:`os.makedirs`, attempts to create existing directories raise
no exception.
"""
if mode is None:
out = hsi('mkdir', '-p', path)
else:
out = hsi('mkdir', '-p', '-m', mode, path)
if out.startswith('**'):
raise HpssOSError(out)
return
def mkdir(path, mode=None):
"""Reproduces the behavior of :func:`os.mkdir`.
Parameters
----------
path : :class:`str`
Directory to create.
mode : :class:`str`, optional
String representation of the octal directory mode.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
Notes
-----
Unlike :func:`os.mkdir`, attempts to create existing directories raise no
exception.
"""
if mode is None:
out = hsi('mkdir', path)
else:
out = hsi('mkdir', '-m', mode, path)
if out.startswith('**'):
raise HpssOSError(out)
return
def stat(path, follow_symlinks=True):
"""Perform the equivalent of :func:`os.stat` on the HPSS file `path`.
Parameters
----------
path : :class:`str`
Path to file or directory.
follow_symlinks : :class:`bool`, optional
If ``False``, makes :func:`stat` behave like :func:`os.lstat`.
Returns
-------
:class:`~hpsspy.util.HpssFile`
An object that contains information similar to the data returned by
:func:`os.stat`.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
"""
from . import linere
from os.path import join
out = hsi('ls', '-ld', path)
if out.startswith('**'):
raise HpssOSError(out)
lines = out.split('\n')
lspath = path # sometimes you don't get the path echoed back.
files = list()
for f in lines:
if len(f) == 0:
continue
m = linere.match(f)
if m is None:
if f.endswith(':'):
lspath = f.strip(': ')
else:
raise HpssOSError("Could not match line!\n{0}".format(f))
else:
g = m.groups()
files.append(HpssFile(lspath, *g))
if len(files) != 1:
raise HpssOSError("Non-unique response for {0}!".format(path))
if files[0].islink and follow_symlinks:
return stat(files[0].readlink)
else:
return files[0]
def lstat(path):
"""Perform the equivalent of :func:`os.lstat` on the HPSS file `path`.
Parameters
----------
path : :class:`str`
Path to file or directory.
Returns
-------
:class:`~hpsspy.util.HpssFile`
An object that contains information similar to the data returned by
:func:`os.stat`.
Raises
------
:class:`~hpsspy.HpssOSError`
If the underlying :command:`hsi` reports an error.
"""
return stat(path, follow_symlinks=False)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Traverse a directory tree on HPSS, similar to :func:`os.walk`.
Parameters
----------
top : :class:`str`
Starting directory.
topdown : :class:`bool`, optional
Direction to traverse the directory tree.
onerror : callable, optional
Call this function if an error is detected.
followlinks : :class:`bool`, optional
If ``True`` symlinks to directories are treated as directories.
Returns
-------
iterable
This function can be used in the same way as :func:`os.walk`.
"""
from .path import islink
from os.path import join
#
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
#
try:
names = listdir(top)
except HpssOSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if name.isdir:
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, str(name))
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
|
weaverba137/hpsspy
|
hpsspy/os/_os.py
|
Python
|
bsd-3-clause
| 7,074
|
[
"VisIt"
] |
d8743fa29bcff5ef15ef6f22f79c4f95980f8344136b3616fa848a6f200fb77f
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the core module.
"""
import itertools
from contextlib import nullcontext
from astropy.modeling.models import Gaussian1D, Gaussian2D
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..gaussian import centroid_1dg, centroid_2dg, _gaussian1d_moments
from ...utils._optional_deps import HAS_SCIPY # noqa
XCEN = 25.7
YCEN = 26.2
XSTDS = [3.2, 4.0]
YSTDS = [5.7, 4.1]
THETAS = np.array([30., 45.]) * np.pi / 180.
DATA = np.zeros((3, 3))
DATA[0:2, 1] = 1.
DATA[1, 0:2] = 1.
DATA[1, 1] = 2.
# NOTE: the fitting routines in astropy use scipy.optimize
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('x_std', 'y_std', 'theta'),
list(itertools.product(XSTDS, YSTDS, THETAS)))
def test_centroids(x_std, y_std, theta):
model = Gaussian2D(2.4, XCEN, YCEN, x_stddev=x_std, y_stddev=y_std,
theta=theta)
y, x = np.mgrid[0:50, 0:47]
data = model(x, y)
xc, yc = centroid_1dg(data)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
xc, yc = centroid_2dg(data)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
# test with errors
error = np.sqrt(data)
xc, yc = centroid_1dg(data, error=error)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
xc, yc = centroid_2dg(data, error=error)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
# test with mask
mask = np.zeros(data.shape, dtype=bool)
data[10, 10] = 1.e5
mask[10, 10] = True
xc, yc = centroid_1dg(data, mask=mask)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
xc, yc = centroid_2dg(data, mask=mask)
assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('use_mask', [True, False])
def test_centroids_nan_withmask(use_mask):
xc_ref = 24.7
yc_ref = 25.2
model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=5.0, y_stddev=5.0)
y, x = np.mgrid[0:50, 0:50]
data = model(x, y)
data[20, :] = np.nan
if use_mask:
mask = np.zeros(data.shape, dtype=bool)
mask[20, :] = True
nwarn = 0
ctx = nullcontext()
else:
mask = None
nwarn = 1
ctx = pytest.warns(AstropyUserWarning,
match='Input data contains non-finite values')
with ctx as warnlist:
xc, yc = centroid_1dg(data, mask=mask)
assert_allclose([xc, yc], [xc_ref, yc_ref], rtol=0, atol=1.e-3)
if nwarn == 1:
assert len(warnlist) == nwarn
with ctx as warnlist:
xc, yc = centroid_2dg(data, mask=mask)
assert_allclose([xc, yc], [xc_ref, yc_ref], rtol=0, atol=1.e-3)
if nwarn == 1:
assert len(warnlist) == nwarn
@pytest.mark.skipif('not HAS_SCIPY')
def test_invalid_mask_shape():
data = np.zeros((4, 4))
mask = np.zeros((2, 2), dtype=bool)
with pytest.raises(ValueError):
centroid_1dg(data, mask=mask)
with pytest.raises(ValueError):
centroid_2dg(data, mask=mask)
with pytest.raises(ValueError):
_gaussian1d_moments(data, mask=mask)
@pytest.mark.skipif('not HAS_SCIPY')
def test_invalid_error_shape():
error = np.zeros((2, 2), dtype=bool)
with pytest.raises(ValueError):
centroid_1dg(np.zeros((4, 4)), error=error)
with pytest.raises(ValueError):
centroid_2dg(np.zeros((4, 4)), error=error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_centroid_2dg_dof():
data = np.ones((2, 2))
with pytest.raises(ValueError):
centroid_2dg(data)
def test_gaussian1d_moments():
x = np.arange(100)
desired = (75, 50, 5)
g = Gaussian1D(*desired)
data = g(x)
result = _gaussian1d_moments(data)
assert_allclose(result, desired, rtol=0, atol=1.e-6)
data[0] = 1.e5
mask = np.zeros(data.shape).astype(bool)
mask[0] = True
result = _gaussian1d_moments(data, mask=mask)
assert_allclose(result, desired, rtol=0, atol=1.e-6)
data[0] = np.nan
mask = np.zeros(data.shape).astype(bool)
mask[0] = True
with pytest.warns(AstropyUserWarning) as warnlist:
result = _gaussian1d_moments(data, mask=mask)
assert_allclose(result, desired, rtol=0, atol=1.e-6)
assert len(warnlist) == 1
|
astropy/photutils
|
photutils/centroids/tests/test_gaussian.py
|
Python
|
bsd-3-clause
| 4,408
|
[
"Gaussian"
] |
e65a8f0448173e3c3699c7d38d101419552a84af2e8a955a68f5babfc430c3cf
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _UpFIRDn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError('mode is invalid')
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
"""
np_conv_ok = volume.ndim == kernel.ndim == 1
return np_conv_ok and (volume.size >= kernel.size or mode != 'same')
def _fftconvolve_valid(volume, kernel):
# fftconvolve doesn't support complex256
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
if volume.dtype == not_fft_conv_supp or kernel.dtype == not_fft_conv_supp:
return False
# for integer input,
# catch when more precision required than float provides (representing a
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return False
if _numeric_arrays([volume, kernel]):
return False
return True
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
if volume.dtype.kind in 'ui':
out = np.around(out)
return out.astype(volume.dtype)
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * up / float(down)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = asarray(window)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
ufd = _UpFIRDn(h, x.dtype, up, down)
n_pre_remove_end = n_pre_remove + n_out
def apply_remove(x):
"""Apply the upfirdn filter and remove excess"""
return ufd.apply_filter(x)[n_pre_remove:n_pre_remove_end]
y = np.apply_along_axis(apply_remove, axis, x)
return y
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=None):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. For downsampling factors higher than 13, it is
recommended to call `decimate` multiple times.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 30 for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. A value of ``True`` is
recommended, since a phase shift is generally not desired. Using
``None`` defaults to ``False`` for backwards compatibility. This
default will change to ``True`` in a future release, so it is best to
set this argument explicitly.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is not None and not isinstance(n, int):
raise TypeError("n must be an integer")
if ftype == 'fir':
if n is None:
n = 30
system = dlti(firwin(n+1, 1. / q, window='hamming'), 1.)
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
n = np.max((system.num.size, system.den.size)) - 1
else:
raise ValueError('invalid ftype')
if zero_phase is None:
warnings.warn(" Note: Decimate's zero_phase keyword argument will "
"default to True in a future release. Until then, "
"decimate defaults to one-way filtering for backwards "
"compatibility. Ideally, always set this argument "
"explicitly.", FutureWarning)
zero_phase = False
sl = [slice(None)] * x.ndim
if len(system.den) == 1: # FIR case
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=system.num)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(system.num, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(system.num, system.den, x, axis=axis)
else:
y = lfilter(system.num, system.den, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
|
bkendzior/scipy
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 116,017
|
[
"Gaussian"
] |
c3d47e5345cb8c16482797bf202792784ee429ba50318f5c7e418ad7b1aafec0
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Modules for handling vtkRenderWindowInteractor events"""
from vistrails.core.modules.basic_modules import String, Variant
from vistrails.core.modules.vistrails_module import Module, NotCacheable
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.gui.modules.source_configure import SourceConfigurationWidget
from vistrails.gui.modules.python_source_configure import PythonEditor
import urllib
from identifiers import identifier as vtk_pkg_identifier
################################################################################
class vtkInteractionHandler(NotCacheable, Module):
"""
vtkInteractionHandler allow users to insert callback code for interacting
with the vtkRenderWindowInteractor InteractionEvent
"""
# Since vtkCommand is not wrapped in Python, we need to hardcoded all events
# string from vtkCommand.h
vtkEvents = [
'AnyEvent',
'DeleteEvent',
'StartEvent',
'EndEvent',
'RenderEvent',
'ProgressEvent',
'PickEvent',
'StartPickEvent',
'EndPickEvent',
'AbortCheckEvent',
'ExitEvent',
'LeftButtonPressEvent',
'LeftButtonReleaseEvent',
'MiddleButtonPressEvent',
'MiddleButtonReleaseEvent',
'RightButtonPressEvent',
'RightButtonReleaseEvent',
'EnterEvent',
'LeaveEvent',
'KeyPressEvent',
'KeyReleaseEvent',
'CharEvent',
'ExposeEvent',
'ConfigureEvent',
'TimerEvent',
'MouseMoveEvent',
'MouseWheelForwardEvent',
'MouseWheelBackwardEvent',
'ResetCameraEvent',
'ResetCameraClippingRangeEvent',
'ModifiedEvent',
'WindowLevelEvent',
'StartWindowLevelEvent',
'EndWindowLevelEvent',
'ResetWindowLevelEvent',
'SetOutputEvent',
'ErrorEvent',
'WarningEvent',
'StartInteractionEvent',
'InteractionEvent',
'EndInteractionEvent',
'EnableEvent',
'DisableEvent',
'CreateTimerEvent',
'DestroyTimerEvent',
'PlacePointEvent',
'PlaceWidgetEvent',
'CursorChangedEvent',
'ExecuteInformationEvent',
'RenderWindowMessageEvent',
'WrongTagEvent',
'StartAnimationCueEvent',
'AnimationCueTickEvent',
'EndAnimationCueEvent',
'VolumeMapperRenderEndEvent',
'VolumeMapperRenderProgressEvent',
'VolumeMapperRenderStartEvent',
'VolumeMapperComputeGradientsEndEvent',
'VolumeMapperComputeGradientsProgressEvent',
'VolumeMapperComputeGradientsStartEvent',
'WidgetModifiedEvent',
'WidgetValueChangedEvent',
'WidgetActivateEvent',
'ConnectionCreatedEvent',
'ConnectionClosedEvent',
'DomainModifiedEvent',
'PropertyModifiedEvent',
'UpdateEvent',
'RegisterEvent',
'UnRegisterEvent',
'UpdateInformationEvent']
def __init__(self):
Module.__init__(self)
self.observer = None
self.handler = None
self.shareddata = None
def compute(self):
""" compute() -> None
Actually compute nothing
"""
self.observer = self.force_get_input('Observer')
self.handler = self.force_get_input('Handler', '')
self.shareddata = self.force_get_input_list('SharedData')
if len(self.shareddata)==1:
self.shareddata = self.shareddata[0]
if self.observer:
source = urllib.unquote(self.handler)
observer = self.observer.vtkInstance
for e in vtkInteractionHandler.vtkEvents:
f = e[0].lower() + e[1:]
f = f.replace('Event', 'Handler')
source += ('\nif locals().has_key("%s"):\n' % f +
'\tobserver.AddObserver("%s", ' % e +
'self.eventHandler)\n')
exec(source)
if hasattr(self.observer.vtkInstance, 'PlaceWidget'):
self.observer.vtkInstance.PlaceWidget()
def eventHandler(self, obj, event):
""" eventHandler(obj: vtkObject, event: str) -> None
A proxy for all vtk events to direct to the correct calls
"""
if self.handler!='':
source = urllib.unquote(self.handler)
f = event[0].lower() + event[1:]
f = f.replace('Event', 'Handler')
myGlobals = globals()
myGlobals.update({'self':self})
exec(source + ('\nif locals().has_key("%s"):\n' % f)+
('\t%s(obj, self.shareddata)' % f)) in myGlobals, locals()
def clear(self):
""" clear() -> None
Remove event handler so the object can be freed correctly
"""
# Remove all observers
if self.observer:
for e in vtkInteractionHandler.vtkEvents:
self.observer.vtkInstance.RemoveObservers(e)
Module.clear(self)
def repaintCells(self):
""" repaintCells() -> None
Redraw all cells on the current sheet
"""
from vistrails.packages.spreadsheet.spreadsheet_controller \
import spreadsheetController
from vistrails.packages.spreadsheet.spreadsheet_event \
import RepaintCurrentSheetEvent
spreadsheetController.postEventToSpreadsheet(RepaintCurrentSheetEvent())
class HandlerConfigurationWidget(SourceConfigurationWidget):
def __init__(self, module, controller, parent=None):
""" HandlerConfigurationWidget(module: Module,
controller: VistrailController,
parent: QWidget)
-> HandlerConfigurationWidget
Setup the dialog to similar to PythonSource but with a
different name
"""
SourceConfigurationWidget.__init__(self, module, controller,
PythonEditor, False, False, parent,
portName='Handler')
def registerSelf():
""" registerSelf() -> None
Registry module with the registry
"""
registry = get_module_registry()
vIO = registry.get_descriptor_by_name(vtk_pkg_identifier,
'vtkInteractorObserver').module
registry.add_module(vtkInteractionHandler, configureWidgetType=HandlerConfigurationWidget)
registry.add_input_port(vtkInteractionHandler, 'Observer', vIO)
registry.add_input_port(vtkInteractionHandler, 'Handler', String, True)
registry.add_input_port(vtkInteractionHandler, 'SharedData', Variant)
registry.add_output_port(vtkInteractionHandler, 'self',
vtkInteractionHandler)
|
Nikea/VisTrails
|
vistrails/packages/vtk/vtkhandler.py
|
Python
|
bsd-3-clause
| 8,755
|
[
"VTK"
] |
3f9a2d497e25688006e39cc5acfebf457c95b2fc1f0be0b0ed0dc55956557911
|
###############################################################################
#
# coverageWindows.py - calculate coverage of windows within sequences
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import sys
import os
import multiprocessing as mp
import logging
import pysam
from numpy import zeros
class ReadLoader:
"""Callback for counting aligned reads with pysam.fetch"""
def __init__(self, refLength, bAllReads, minAlignPer, maxEditDistPer):
self.bAllReads = bAllReads
self.minAlignPer = minAlignPer
self.maxEditDistPer = maxEditDistPer
self.numReads = 0
self.numMappedReads = 0
self.numDuplicates = 0
self.numSecondary = 0
self.numFailedQC = 0
self.numFailedAlignLen = 0
self.numFailedEditDist = 0
self.numFailedProperPair = 0
self.coverage = zeros(refLength)
def __call__(self, read):
self.numReads += 1
if read.is_unmapped:
pass
elif read.is_duplicate:
self.numDuplicates += 1
elif read.is_secondary:
self.numSecondary += 1
elif read.is_qcfail:
self.numFailedQC += 1
elif read.alen < self.minAlignPer * read.rlen:
self.numFailedAlignLen += 1
elif read.opt('NM') > self.maxEditDistPer * read.rlen:
self.numFailedEditDist += 1
elif not self.bAllReads and not read.is_proper_pair:
self.numFailedProperPair += 1
else:
self.numMappedReads += 1
# Note: the alignment length (alen) is used instead of the
# read length (rlen) as this bring the calculated coverage
# in line with 'samtools depth' (at least when the min
# alignment length and edit distance thresholds are zero).
self.coverage[read.pos:read.pos + read.alen] += 1.0
class CoverageStruct():
def __init__(self, seqLen, mappedReads, coverage):
self.seqLen = seqLen
self.mappedReads = mappedReads
self.coverage = coverage
class CoverageWindows():
"""Calculate coverage of all sequences."""
def __init__(self, threads):
self.logger = logging.getLogger()
self.totalThreads = threads
def run(self, binFiles, bamFile, bAllReads, minAlignPer, maxEditDistPer, windowSize):
"""Calculate coverage of full sequences and windows."""
# make sure BAM file is sorted
if not os.path.exists(bamFile + '.bai'):
self.logger.error(' [Error] BAM file is not sorted: ' + bamFile + '\n')
sys.exit()
# calculate coverage of each BAM file
self.logger.info(' Calculating coverage of windows.')
coverageInfo = mp.Manager().dict()
coverageInfo = self.__processBam(bamFile, bAllReads, minAlignPer, maxEditDistPer, windowSize, coverageInfo)
return coverageInfo
def __processBam(self, bamFile, bAllReads, minAlignPer, maxEditDistPer, windowSize, coverageInfo):
"""Calculate coverage of sequences in BAM file."""
# determine coverage for each reference sequence
workerQueue = mp.Queue()
writerQueue = mp.Queue()
bamfile = pysam.Samfile(bamFile, 'rb')
refSeqIds = bamfile.references
refSeqLens = bamfile.lengths
# populate each thread with reference sequence to process
# Note: reference sequences are sorted by number of mapped reads
# so it is important to distribute reads in a sensible way to each
# of the threads
refSeqLists = [[] for _ in range(self.totalThreads)]
refLenLists = [[] for _ in range(self.totalThreads)]
threadIndex = 0
incDir = 1
for refSeqId, refLen in zip(refSeqIds, refSeqLens):
refSeqLists[threadIndex].append(refSeqId)
refLenLists[threadIndex].append(refLen)
threadIndex += incDir
if threadIndex == self.totalThreads:
threadIndex = self.totalThreads - 1
incDir = -1
elif threadIndex == -1:
threadIndex = 0
incDir = 1
for i in range(self.totalThreads):
workerQueue.put((refSeqLists[i], refLenLists[i]))
for _ in range(self.totalThreads):
workerQueue.put((None, None))
try:
workerProc = [mp.Process(target=self.__workerThread, args=(bamFile, bAllReads, minAlignPer, maxEditDistPer, windowSize, workerQueue, writerQueue)) for _ in range(self.totalThreads)]
writeProc = mp.Process(target=self.__writerThread, args=(coverageInfo, len(refSeqIds), writerQueue))
writeProc.start()
for p in workerProc:
p.start()
for p in workerProc:
p.join()
writerQueue.put((None, None, None, None, None, None, None, None, None, None, None, None))
writeProc.join()
except:
# make sure all processes are terminated
for p in workerProc:
p.terminate()
writeProc.terminate()
return coverageInfo
def __workerThread(self, bamFile, bAllReads, minAlignPer, maxEditDistPer, windowSize, queueIn, queueOut):
"""Process each data item in parallel."""
while True:
seqIds, seqLens = queueIn.get(block=True, timeout=None)
if seqIds == None:
break
bamfile = pysam.Samfile(bamFile, 'rb')
for seqId, seqLen in zip(seqIds, seqLens):
readLoader = ReadLoader(seqLen, bAllReads, minAlignPer, maxEditDistPer)
bamfile.fetch(seqId, 0, seqLen, callback=readLoader)
start = 0
end = windowSize
windowCoverages = []
while(end < seqLen):
windowCoverages.append(sum(readLoader.coverage[start:end]) / windowSize)
start = end
try:
end += windowSize
except:
print '*****************'
print end
print windowSize
print '******************'
coverage = float(sum(readLoader.coverage)) / seqLen
queueOut.put((seqId, seqLen, coverage, windowCoverages, readLoader.numReads,
readLoader.numDuplicates, readLoader.numSecondary, readLoader.numFailedQC,
readLoader.numFailedAlignLen, readLoader.numFailedEditDist,
readLoader.numFailedProperPair, readLoader.numMappedReads))
bamfile.close()
def __writerThread(self, coverageInfo, numRefSeqs, writerQueue):
"""Store or write results of worker threads in a single thread."""
totalReads = 0
totalDuplicates = 0
totalSecondary = 0
totalFailedQC = 0
totalFailedAlignLen = 0
totalFailedEditDist = 0
totalFailedProperPair = 0
totalMappedReads = 0
processedRefSeqs = 0
while True:
seqId, seqLen, coverage, windowCoverages, numReads, numDuplicates, numSecondary, numFailedQC, numFailedAlignLen, numFailedEditDist, numFailedProperPair, numMappedReads = writerQueue.get(block=True, timeout=None)
if seqId == None:
break
if self.logger.getEffectiveLevel() <= logging.INFO:
processedRefSeqs += 1
statusStr = ' Finished processing %d of %d (%.2f%%) reference sequences.' % (processedRefSeqs, numRefSeqs, float(processedRefSeqs) * 100 / numRefSeqs)
sys.stderr.write('%s\r' % statusStr)
sys.stderr.flush()
totalReads += numReads
totalDuplicates += numDuplicates
totalSecondary += numSecondary
totalFailedQC += numFailedQC
totalFailedAlignLen += numFailedAlignLen
totalFailedEditDist += numFailedEditDist
totalFailedProperPair += numFailedProperPair
totalMappedReads += numMappedReads
coverageInfo[seqId] = [coverage, windowCoverages]
if self.logger.getEffectiveLevel() <= logging.INFO:
sys.stderr.write('\n')
print ''
print ' # total reads: %d' % totalReads
print ' # properly mapped reads: %d (%.1f%%)' % (totalMappedReads, float(totalMappedReads) * 100 / totalReads)
print ' # duplicate reads: %d (%.1f%%)' % (totalDuplicates, float(totalDuplicates) * 100 / totalReads)
print ' # secondary reads: %d (%.1f%%)' % (totalSecondary, float(totalSecondary) * 100 / totalReads)
print ' # reads failing QC: %d (%.1f%%)' % (totalFailedQC, float(totalFailedQC) * 100 / totalReads)
print ' # reads failing alignment length: %d (%.1f%%)' % (totalFailedAlignLen, float(totalFailedAlignLen) * 100 / totalReads)
print ' # reads failing edit distance: %d (%.1f%%)' % (totalFailedEditDist, float(totalFailedEditDist) * 100 / totalReads)
print ' # reads not properly paired: %d (%.1f%%)' % (totalFailedProperPair, float(totalFailedProperPair) * 100 / totalReads)
print ''
|
fw1121/CheckM
|
checkm/coverageWindows.py
|
Python
|
gpl-3.0
| 10,582
|
[
"pysam"
] |
8e3fca0a4d3ab620e8f19476bf5b7d701c59b8f6e66ddd69d6e3536d1ac1a811
|
from ase.ga.data import PrepareDB
from ase.ga.startgenerator import StartGenerator
from ase.ga.utilities import closest_distances_generator
from ase.ga.utilities import get_all_atom_types
from ase.constraints import FixAtoms
import numpy as np
from ase.lattice.surface import fcc111
db_file = 'gadb.db'
# create the surface
slab = fcc111('Au', size=(4, 4, 1), vacuum=10.0, orthogonal=True)
slab.set_constraint(FixAtoms(mask=len(slab) * [True]))
# define the volume in which the adsorbed cluster is optimized
# the volume is defined by a corner position (p0)
# and three spanning vectors (v1, v2, v3)
pos = slab.get_positions()
cell = slab.get_cell()
p0 = np.array([0., 0., max(pos[:, 2]) + 2.])
v1 = cell[0, :] * 0.8
v2 = cell[1, :] * 0.8
v3 = cell[2, :]
v3[2] = 3.
# Define the composition of the atoms to optimize
atom_numbers = 2 * [47] + 2 * [79]
# define the closest distance two atoms of a given species can be to each other
unique_atom_types = get_all_atom_types(slab, atom_numbers)
cd = closest_distances_generator(atom_numbers=unique_atom_types,
ratio_of_covalent_radii=0.7)
# create the starting population
sg = StartGenerator(slab=slab,
atom_numbers=atom_numbers,
closest_allowed_distances=cd,
box_to_place_in=[p0, [v1, v2, v3]])
# generate the starting population
population_size = 5
starting_population = [sg.get_new_candidate() for i in range(population_size)]
# from ase.visualize import view # uncomment these lines
# view(starting_population) # to see the starting population
# create the database to store information in
d = PrepareDB(db_file_name=db_file,
simulation_cell=slab,
stoichiometry=atom_numbers,)
# population_size=population_size)
for a in starting_population:
d.add_unrelaxed_candidate(a)
|
suttond/MODOI
|
ase/test/ga/basic_example_create_database.py
|
Python
|
lgpl-3.0
| 1,878
|
[
"ASE"
] |
57248a8086a12402e44b7e050cce75b20b1edd7eba0d2f61dcb3334f7f9f7954
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2005 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: sampleposteddata.py
This page lists the data posted by a form.
File Authors:
Andrew Liu (andrew@liuholdings.com)
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table width="100%" border="1" cellspacing="0" bordercolor="#999999">
<tr style="FONT-WEIGHT: bold; COLOR: #dddddd; BACKGROUND-COLOR: #999999">
<td nowrap>Field Name </td>
<td>Value</td>
</tr>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<td valign="top" nowrap><b>%s</b></td>
<td width="100%%">%s</td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
|
dapfru/gladiators
|
parsek/cls/editor/_samples/py/sampleposteddata.py
|
Python
|
gpl-2.0
| 1,909
|
[
"VisIt"
] |
84c486c81784b9a24555d2da9853fb7dbe756f110aef79b2e699926946d6b0eb
|
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from catalogue import models
from catalogue.test_utils import BookInfoStub, PersonStub, WLTestCase, info_args
from django.core.files.base import ContentFile
class VisitTest(WLTestCase):
"""Simply create some objects and visit some views."""
def setUp(self):
WLTestCase.setUp(self)
author = PersonStub(("Jane",), "Doe")
book_info = BookInfoStub(author=author, genre="Sielanka",
epoch='Epoch', kind="Kind", **info_args("A book"))
self.book = models.Book.from_text_and_meta(ContentFile('''
<utwor>
<opowiadanie>
<akap>
<begin id="b1" />
<motyw id="m1">Sielanka</motyw>
Test
<end id="e1" />
</akap>
</opowiadanie>
</utwor>
'''), book_info)
self.collection = models.Collection.objects.create(
title='Biblioteczka Boya', slug='boy', book_slugs='a-book')
def test_visit_urls(self):
""" book description should return authors, ancestors, book """
url_map = {
200: [
'',
'lektury/',
'lektury/boy/',
'nowe/',
'lektura/a-book/',
'lektura/a-book.html',
'lektura/a-book/motyw/sielanka/',
'motyw/sielanka/',
'sielanka/',
'autor/jane-doe/',
'daisy/',
# 'autor/jane-doe/gatunek/genre/',
# 'autor/jane-doe/gatunek/genre/motyw/sielanka/',
],
404: [
'lektury/nonexistent/', # Nonexistent Collection.
'lektura/nonexistent/', # Nonexistent Book.
'lektura/nonexistent.html', # Nonexistent Book's HTML.
'lektura/nonexistent/motyw/sielanka/', # Nonexistent Book's theme.
'lektura/a-book/motyw/nonexistent/', # Nonexistent theme in a Book.
'autor/nonexistent/', # Nonexistent author.
'motyw/nonexistent/', # Nonexistent theme.
'zh.json', # Nonexistent language.
]
}
prefix = '/katalog/'
for expected_status, urls in url_map.items():
for url in urls:
status = self.client.get(prefix + url).status_code
self.assertEqual(
status, expected_status,
"Wrong status code for '%s'. Expected %d, got %d." % (prefix + url, expected_status, status))
|
fnp/wolnelektury
|
src/catalogue/tests/test_visit.py
|
Python
|
agpl-3.0
| 2,750
|
[
"VisIt"
] |
cdad4b13eb91443b67ebe18d13145c4ec3f59b328c91e989deb64df6dcc74af2
|
from degas import pipeline
import os
from degas.gridding import gridGalaxy
from degas import catalogs
ppo = False
gallist = ['IC0342']
degasdir = '/mnt/bigdata/erosolow/surveys/DEGAS/'
datadir='/mnt/bigdata/erosolow/surveys/DEGAS/'
#catalogs.updateLogs('ObservationLog.csv')
#pipeline.reduceAll(release='QA0', galaxyList=gallist,
# OffType='PCA')
gridGalaxy(galaxy='IC0342', setup='12CO',
release='QA0', datadir=datadir,
PostprocOnly=ppo)
gridGalaxy(galaxy='IC0342', setup='HCN_HCO+',
release='QA0', datadir=datadir, PostprocOnly=ppo)
gridGalaxy(galaxy='IC0342', setup='13CO_C18O',
release='QA0', datadir=datadir, PostprocOnly=ppo)
HCNgals = [
'NGC4038',
'NGC2146',
'NGC6946',
'NGC7331',
'NGC5248',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC3521',
'NGC4414',
'NGC0337',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4535',
'NGC4569',
]
HCNgals=['IC0342']
COgals = [
'NGC4038',
'NGC2146',
'NGC7331',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC0337',
'NGC4569',
'NGC3521',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4414',
'NGC4535',
'IC0342',
]
# gridGalaxy(galaxy='NGC5055', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC5055', setup='HCN_HCO+', release='QA0', datadir=datadir)
#gridGalaxy(galaxy='NGC7331', setup='HCN_HCO+',
# release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC6946', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC3521', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='HCN_HCO+', release='QA0', datadir=datadir)
|
low-sky/degas
|
degas/examples/ic342test.py
|
Python
|
gpl-3.0
| 3,020
|
[
"Galaxy"
] |
b58ded56ea93d57da170ff4285bb5cbc6c96e872269dbc679b084eb06dfa772b
|
"""Scraper for United States District Court for the District of Columbia
CourtID: dcd
Court Short Name: D.D.C.
Author: V. David Zvenyach
Date created: 2014-02-27
Substantially Revised: Brian W. Carver, 2014-03-28
"""
from juriscraper.opinions.united_states_backscrapers.federal_district import dcd_2013
class Site(dcd_2013.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.url = 'https://ecf.dcd.uscourts.gov/cgi-bin/Opinions.pl?2010'
|
brianwc/juriscraper
|
opinions/united_states_backscrapers/federal_district/dcd_2010.py
|
Python
|
bsd-2-clause
| 507
|
[
"Brian"
] |
1631addd54485221a1a4f38f515b360c45f3e42933928cf28569eed5eaa4f330
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for Exonerate vulgar output format."""
import re
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._py3k import zip
from ._base import _BaseExonerateParser, _BaseExonerateIndexer, _STRAND_MAP
__all__ = ['ExonerateVulgarParser', 'ExonerateVulgarIndexer']
# precompile regex
_RE_VULGAR = re.compile(r"""^vulgar:\s+
(\S+)\s+(\d+)\s+(\d+)\s+([\+-\.])\s+ # query: ID, start, end, strand
(\S+)\s+(\d+)\s+(\d+)\s+([\+-\.])\s+ # hit: ID, start, end, strand
(\d+)(\s+.*)$ # score, vulgar components
""", re.VERBOSE)
_RE_VCOMP = re.compile(r"""
\s+(\S+) # vulgar label (C/M: codon/match, G: gap, N: ner, 5/3: splice
# site, I: intron, S: split codon, F: frameshift)
\s+(\d+) # how many residues to advance in query sequence
\s+(\d+) # how many residues to advance in hit sequence
""", re.VERBOSE)
def parse_vulgar_comp(hsp, vulgar_comp):
"""Parses the vulgar components present in the hsp dictionary."""
# containers for block coordinates
qstarts, qends, hstarts, hends = \
[hsp['query_start']], [], [hsp['hit_start']], []
# containers for split codons
hsp['query_split_codons'], hsp['hit_split_codons'] = [], []
# containers for ner blocks
hsp['query_ner_ranges'], hsp['hit_ner_ranges'] = [], []
# sentinels for tracking query and hit positions
qpos, hpos = hsp['query_start'], hsp['hit_start']
# multiplier for determining sentinel movement
qmove = 1 if hsp['query_strand'] >= 0 else -1
hmove = 1 if hsp['hit_strand'] >= 0 else -1
vcomps = re.findall(_RE_VCOMP, vulgar_comp)
for idx, match in enumerate(vcomps):
label, qstep, hstep = match[0], int(match[1]), int(match[2])
# check for label, must be recognized
assert label in 'MCGF53INS', "Unexpected vulgar label: %r" % label
# match, codon, or gaps
if label in 'MCGS':
# if the previous comp is not an MCGS block, it's the
# start of a new block
if vcomps[idx - 1][0] not in 'MCGS':
qstarts.append(qpos)
hstarts.append(hpos)
# other labels
# store the values in the hsp dict as a tuple of (start, stop)
# we're not doing anything if the label is in '53IN', as these
# basically tell us what the inter-block coordinates are and
# inter-block coordinates are automatically calculated by
# and HSP property
if label == 'S':
# get start and stop from parsed values
qstart, hstart = qpos, hpos
qend = qstart + qstep * qmove
hend = hstart + hstep * hmove
# adjust the start-stop ranges
sqstart, sqend = min(qstart, qend), max(qstart, qend)
shstart, shend = min(hstart, hend), max(hstart, hend)
# split codons
# XXX: is it possible to have a frameshift that introduces
# a codon split? If so, this may need a different treatment..
hsp['query_split_codons'].append((sqstart, sqend))
hsp['hit_split_codons'].append((shstart, shend))
# move sentinels accordingly
qpos += qstep * qmove
hpos += hstep * hmove
# append to ends if the next comp is not an MCGS block or
# if it's the last comp
if idx == len(vcomps) - 1 or \
(label in 'MCGS' and vcomps[idx + 1][0] not in 'MCGS'):
qends.append(qpos)
hends.append(hpos)
# adjust coordinates
for seq_type in ('query_', 'hit_'):
strand = hsp[seq_type + 'strand']
# switch coordinates if strand is < 0
if strand < 0:
# switch the starts and ends
hsp[seq_type + 'start'], hsp[seq_type + 'end'] = \
hsp[seq_type + 'end'], hsp[seq_type + 'start']
if seq_type == 'query_':
qstarts, qends = qends, qstarts
else:
hstarts, hends = hends, hstarts
# set start and end ranges
hsp['query_ranges'] = list(zip(qstarts, qends))
hsp['hit_ranges'] = list(zip(hstarts, hends))
return hsp
class ExonerateVulgarParser(_BaseExonerateParser):
"""Parser for Exonerate vulgar strings."""
_ALN_MARK = 'vulgar'
def parse_alignment_block(self, header):
qresult = header['qresult']
hit = header['hit']
hsp = header['hsp']
self.read_until(lambda line: line.startswith('vulgar'))
vulgars = re.search(_RE_VULGAR, self.line)
# if the file has c4 alignments
# check if vulgar values match our previously parsed header values
if self.has_c4_alignment:
assert qresult['id'] == vulgars.group(1)
assert hsp['query_start'] == vulgars.group(2)
assert hsp['query_end'] == vulgars.group(3)
assert hsp['query_strand'] == vulgars.group(4)
assert hit['id'] == vulgars.group(5)
assert hsp['hit_start'] == vulgars.group(6)
assert hsp['hit_end'] == vulgars.group(7)
assert hsp['hit_strand'] == vulgars.group(8)
assert hsp['score'] == vulgars.group(9)
else:
qresult['id'] = vulgars.group(1)
hsp['query_start'] = vulgars.group(2)
hsp['query_end'] = vulgars.group(3)
hsp['query_strand'] = vulgars.group(4)
hit['id'] = vulgars.group(5)
hsp['hit_start'] = vulgars.group(6)
hsp['hit_end'] = vulgars.group(7)
hsp['hit_strand'] = vulgars.group(8)
hsp['score'] = vulgars.group(9)
# adjust strands
hsp['hit_strand'] = _STRAND_MAP[hsp['hit_strand']]
hsp['query_strand'] = _STRAND_MAP[hsp['query_strand']]
# cast coords into ints
hsp['query_start'] = int(hsp['query_start'])
hsp['query_end'] = int(hsp['query_end'])
hsp['hit_start'] = int(hsp['hit_start'])
hsp['hit_end'] = int(hsp['hit_end'])
# cast score into int
hsp['score'] = int(hsp['score'])
# store vulgar line and parse it
# rstrip to remove line endings (otherwise gives errors in Windows)
hsp['vulgar_comp'] = vulgars.group(10).rstrip()
hsp = parse_vulgar_comp(hsp, hsp['vulgar_comp'])
return {'qresult': qresult, 'hit': hit, 'hsp': hsp}
class ExonerateVulgarIndexer(_BaseExonerateIndexer):
"""Indexer class for exonerate vulgar lines."""
_parser = ExonerateVulgarParser
_query_mark = _as_bytes('vulgar')
def get_qresult_id(self, pos):
"""Returns the query ID of the nearest vulgar line."""
handle = self._handle
handle.seek(pos)
# get line, check if it's a vulgar line, and get query ID
line = handle.readline()
assert line.startswith(self._query_mark), line
id = re.search(_RE_VULGAR, _bytes_to_string(line))
return id.group(1)
def get_raw(self, offset):
"""Returns the raw bytes string of a QueryResult object from the given offset."""
handle = self._handle
handle.seek(offset)
qresult_key = None
qresult_raw = _as_bytes('')
while True:
line = handle.readline()
if not line:
break
elif line.startswith(self._query_mark):
cur_pos = handle.tell() - len(line)
if qresult_key is None:
qresult_key = self.get_qresult_id(cur_pos)
else:
curr_key = self.get_qresult_id(cur_pos)
if curr_key != qresult_key:
break
qresult_raw += line
return qresult_raw
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/SearchIO/ExonerateIO/exonerate_vulgar.py
|
Python
|
mit
| 8,143
|
[
"Biopython"
] |
da4fb35af42367998b4b616b7962f0c4692adf4a08e6447fec742cbf4a3ecf26
|
"""
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Mixture of Experts
"""
# TODO : choice of the surrogate model experts to be used
# TODO : support for best number of clusters
# TODO : add factory to get proper surrogate model object
# TODO : implement verbosity 'print_global'
# TODO : documentation
import numpy as np
import warnings
OLD_SKLEARN = False
try: # scikit-learn < 0.20.0
from sklearn.mixture import GMM as GaussianMixture
OLD_SKLEARN = True
except:
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.utils.misc import compute_rms_error
warnings.filterwarnings("ignore", category=DeprecationWarning)
class MOE(SurrogateBasedApplication):
def _initialize(self):
super(MOE, self)._initialize()
declare = self.options.declare
declare("xt", None, types=np.ndarray, desc="Training inputs")
declare("yt", None, types=np.ndarray, desc="Training outputs")
declare(
"ct",
None,
types=np.ndarray,
desc="Training derivative outputs used for clustering",
)
declare("xtest", None, types=np.ndarray, desc="Test inputs")
declare("ytest", None, types=np.ndarray, desc="Test outputs")
declare("n_clusters", 2, types=int, desc="Number of clusters")
declare(
"smooth_recombination",
True,
types=bool,
desc="Continuous cluster transition",
)
declare(
"heaviside_optimization",
False,
types=bool,
desc="Optimize Heaviside scaling factor when smooth recombination is used",
)
declare(
"derivatives_support",
False,
types=bool,
desc="Use only experts that support derivatives prediction",
)
declare(
"variances_support",
False,
types=bool,
desc="Use only experts that support variance prediction",
)
# TODO: should we add leaf surrogate models options?
# for name, smclass in self._surrogate_type.items():
# sm_options = smclass().options
# declare(name+'_options', sm_options._dict, types=dict, desc=name+' options dictionary')
self.x = None
self.y = None
self.c = None
self.n_clusters = None
self.smooth_recombination = None
self.heaviside_optimization = None
self.heaviside_factor = 1.0
self.experts = [
"KRG",
"KPLS",
"KPLSK",
"LS",
"QP",
"RBF",
"IDW",
"RMTB",
"RMTC",
]
self.xt = None
self.yt = None
def set_training_values(self, xt, yt, name=None):
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
self.xt = xt
self.yt = yt
def train(self):
"""
Supports for surrogate model API.
Build and train the mixture of experts surrogate.
"""
if self.xt is not None and self.yt is not None:
# set_training_values has been called
self.x = x = self.xt
self.y = y = self.yt
else:
self.x = x = self.options["xt"]
self.y = y = self.options["yt"]
self.c = c = self.options["ct"]
if not self.c:
self.c = c = y
self.n_clusters = self.options["n_clusters"]
self.smooth_recombination = self.options["smooth_recombination"]
self.heaviside_optimization = (
self.options["smooth_recombination"]
and self.options["heaviside_optimization"]
)
self.heaviside_factor = 1.0
self._check_inputs()
self.expert_types = self._select_expert_types()
self.experts = []
# Set test values and trained values
xtest = self.options["xtest"]
ytest = self.options["ytest"]
values = np.c_[x, y, c]
test_data_present = xtest is not None and ytest is not None
if test_data_present:
self.test_values = np.c_[xtest, ytest]
self.training_values = values
else:
self.test_values, self.training_values = self._extract_part(values, 10)
self.ndim = nx = x.shape[1]
xt = self.training_values[:, 0:nx]
yt = self.training_values[:, nx : nx + 1]
ct = self.training_values[:, nx + 1 :]
# Clustering
self.cluster = GaussianMixture(
n_components=self.n_clusters, covariance_type="full", n_init=20
)
self.cluster.fit(np.c_[xt, ct])
if not self.cluster.converged_:
raise Exception("Clustering not converged")
# Choice of the experts and training
self._fit(xt, yt, ct)
xtest = self.test_values[:, 0:nx]
ytest = self.test_values[:, nx : nx + 1]
# Heaviside factor
if self.heaviside_optimization and self.n_clusters > 1:
self.heaviside_factor = self._find_best_heaviside_factor(xtest, ytest)
print("Best Heaviside factor = {}".format(self.heaviside_factor))
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
if not test_data_present:
# if we have used part of data to validate, fit on overall data
self._fit(x, y, c, new_model=False)
def predict_values(self, x):
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
if self.smooth_recombination:
y = self._predict_smooth_output(x)
else:
y = self._predict_hard_output(x)
return y
def _check_inputs(self):
"""
Check the input data given by the client is correct.
raise Value error with relevant message
"""
if self.x is None or self.y is None:
raise ValueError("check x and y values")
if self.x.shape[0] != self.y.shape[0]:
raise ValueError(
"The number of input points %d doesn t match with the number of output points %d."
% (self.x.shape[0], self.y.shape[0])
)
if self.y.shape[0] != self.c.shape[0]:
raise ValueError(
"The number of output points %d doesn t match with the number of criterion weights %d."
% (self.y.shape[0], self.c.shape[0])
)
# choice of number of cluster
max_n_clusters = int(len(self.x) / 10) + 1
if self.n_clusters > max_n_clusters:
print("Number of clusters should be inferior to {0}".format(max_n_clusters))
raise ValueError(
"The number of clusters is too high considering the number of points"
)
def _select_expert_types(self):
"""
Select relevant surrogate models (experts) regarding MOE options
"""
prototypes = {
name: smclass()
for name, smclass in self._surrogate_type.items()
if name in self.experts
}
if self.options["derivatives_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["derivatives"]
}
if self.options["variances_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["variances"]
}
return {name: self._surrogate_type[name] for name in prototypes}
def _fit(self, x_trained, y_trained, c_trained, new_model=True):
"""
Find the best model for each cluster (clustering already done) and train it if new_model is True
otherwise train the points given (choice of best models by cluster already done)
Arguments
---------
- x_trained: array_like
Input training samples
- y_trained: array_like
Output training samples
- c_trained: array_like
Clustering training samples
- new_model : bool (optional)
Set true to search the best local model
"""
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
cluster_classifier = self.cluster.predict(np.c_[x_trained, c_trained])
# sort trained_values for each cluster
clusters = self._cluster_values(np.c_[x_trained, y_trained], cluster_classifier)
# find model for each cluster
for i in range(self.n_clusters):
if new_model:
model = self._find_best_model(clusters[i])
self.experts.append(model)
else: # retrain the experts with the
trained_values = np.array(clusters[i])
x_trained = trained_values[:, 0 : self.ndim]
y_trained = trained_values[:, self.ndim]
self.experts[i].set_training_values(x_trained, y_trained)
self.experts[i].train()
def _predict_hard_output(self, x):
"""
This method predicts the output of a x samples for a
discontinuous recombination.
Arguments
---------
- x : array_like
x samples
Return
------
- predicted_values : array_like
predicted output
"""
predicted_values = []
probs = self._proba_cluster(x)
sort_cluster = np.apply_along_axis(np.argmax, 1, probs)
for i in range(len(sort_cluster)):
model = self.experts[sort_cluster[i]]
predicted_values.append(model.predict_values(np.atleast_2d(x[i]))[0])
predicted_values = np.array(predicted_values)
return predicted_values
def _predict_smooth_output(self, x, distribs=None):
"""
This method predicts the output of x with a smooth recombination.
Arguments:
----------
- x: np.ndarray
x samples
- distribs: distribution list (optional)
array of membership distributions (use self ones if None)
Returns
-------
- predicted_values : array_like
predicted output
"""
predicted_values = []
if distribs is None:
distribs = self.distribs
sort_proba = self._proba_cluster(x, distribs)
for i in range(len(sort_proba)):
recombined_value = 0
for j in range(len(self.experts)):
recombined_value = (
recombined_value
+ self.experts[j].predict_values(np.atleast_2d(x[i]))[0]
* sort_proba[i][j]
)
predicted_values.append(recombined_value)
predicted_values = np.array(predicted_values)
return predicted_values
@staticmethod
def _extract_part(values, quantile):
"""
Divide the values list in quantile parts to return one part
of (num/quantile) values out of num values.
Arguments
----------
- values : np.ndarray[num, -1]
the values list to extract from
- quantile : int
the quantile
Returns
-------
- extracted, remaining : np.ndarray, np.ndarray
the extracted values part, the remaining values
"""
num = values.shape[0]
indices = np.arange(0, num, quantile) # uniformly distributed
mask = np.zeros(num, dtype=bool)
mask[indices] = True
return values[mask], values[~mask]
def _find_best_model(self, clustered_values):
"""
Find the best model which minimizes the errors.
Arguments :
------------
- clustered_values: array_like
training samples [[X1,X2, ..., Xn, Y], ... ]
Returns :
---------
- model : surrogate model
best trained surrogate model
"""
dim = self.ndim
clustered_values = np.array(clustered_values)
scores = {}
sms = {}
# validation with 10% of the training data
test_values, training_values = self._extract_part(clustered_values, 10)
for name, sm_class in self.expert_types.items():
kwargs = {}
if name in ["RMTB", "RMTC"]:
# Note: RMTS checks for xlimits,
# we take limits on all x (not just the trained_values ones) as
# the surrogate is finally re-trained on the whole x set.
xlimits = np.zeros((dim, 2))
for i in range(dim):
xlimits[i][0] = np.amin(self.x[:, i])
xlimits[i][1] = np.amax(self.x[:, i])
kwargs = {"xlimits": xlimits}
sm = sm_class(**kwargs)
sm.options["print_global"] = False
sm.set_training_values(training_values[:, 0:dim], training_values[:, dim])
sm.train()
expected = test_values[:, dim]
actual = sm.predict_values(test_values[:, 0:dim])
l_two = np.linalg.norm(expected - actual, 2)
# l_two_rel = l_two / np.linalg.norm(expected, 2)
# mse = (l_two**2) / len(expected)
# rmse = mse ** 0.5
scores[sm.name] = l_two
print(sm.name, l_two)
sms[sm.name] = sm
best_name = None
best_score = None
for name, rmse in scores.items():
if best_score is None or rmse < best_score:
best_name, best_score = name, rmse
print("Best expert = {}".format(best_name))
return sms[best_name]
def _find_best_heaviside_factor(self, x, y):
"""
Find the best heaviside factor to smooth approximated values.
Arguments
---------
- x: array_like
input training samples
- y: array_like
output training samples
Returns
-------
hfactor : float
best heaviside factor wrt given samples
"""
heaviside_factor = 1.0
if self.n_clusters > 1:
hfactors = np.linspace(0.1, 2.1, num=21)
errors = []
for hfactor in hfactors:
distribs = self._create_clusters_distributions(hfactor)
ypred = self._predict_smooth_output(x, distribs)
err_rel = np.linalg.norm(y - ypred, 2) / np.linalg.norm(y, 2)
errors.append(err_rel)
if max(errors) < 1e-6:
heaviside_factor = 1.0
else:
min_error_index = errors.index(min(errors))
heaviside_factor = hfactors[min_error_index]
return heaviside_factor
"""
Functions related to clustering
"""
def _create_clusters_distributions(self, heaviside_factor=1.0):
"""
Create an array of frozen multivariate normal distributions (distribs).
Arguments
---------
- heaviside_factor: float
Heaviside factor used to scale covariance matrices
Returns:
--------
- distribs: array_like
Array of frozen multivariate normal distributions
with clusters means and covariances
"""
distribs = []
dim = self.ndim
means = self.cluster.means_
if OLD_SKLEARN:
cov = heaviside_factor * self.cluster.covars_
else:
cov = heaviside_factor * self.cluster.covariances_
for k in range(self.n_clusters):
meansk = means[k][0:dim]
covk = cov[k][0:dim, 0:dim]
mvn = multivariate_normal(meansk, covk)
distribs.append(mvn)
return distribs
def _cluster_values(self, values, classifier):
"""
Classify values regarding the given classifier info.
Arguments
---------
- values: array_like
values to cluster
- classifier: array_like
Cluster corresponding to each point of value in the same order
Returns
-------
- clustered: array_like
Samples sort by cluster
Example:
---------
values:
[[ 1.67016597e-01 5.42927264e-01 9.25779645e+00]
[ 5.20618344e-01 9.88223010e-01 1.51596837e+02]
[ 6.09979830e-02 2.66824984e-01 1.17890707e+02]
[ 9.62783472e-01 7.36979149e-01 7.37641826e+01]
[ 3.01194132e-01 8.58084068e-02 4.88696602e+01]
[ 6.40398203e-01 6.91090937e-01 8.91963162e+01]
[ 7.90710374e-01 1.40464471e-01 1.89390766e+01]
[ 4.64498124e-01 3.61009635e-01 1.04779656e+01]]
cluster_classifier:
[1 0 0 2 1 2 1 1]
clustered:
[[array([ 0.52061834, 0.98822301, 151.59683723]),
array([ 6.09979830e-02, 2.66824984e-01, 1.17890707e+02])]
[array([ 0.1670166 , 0.54292726, 9.25779645]),
array([ 0.30119413, 0.08580841, 48.86966023]),
array([ 0.79071037, 0.14046447, 18.93907662]),
array([ 0.46449812, 0.36100964, 10.47796563])]
[array([ 0.96278347, 0.73697915, 73.76418261]),
array([ 0.6403982 , 0.69109094, 89.19631619])]]
"""
num = len(classifier)
assert values.shape[0] == num
clusters = [[] for n in range(self.n_clusters)]
for i in range(num):
clusters[classifier[i]].append(values[i])
return clusters
def _proba_cluster_one_sample(self, x, distribs):
"""
Compute membership probabilities to each cluster for one sample.
Arguments
---------
- x: array_like
a sample for which probabilities must be calculated
- distribs: multivariate_normal objects list
array of normal distributions
Returns
-------
- prob: array_like
x membership probability for each cluster
"""
weights = np.array(self.cluster.weights_)
rvs = np.array([distribs[k].pdf(x) for k in range(len(weights))])
probs = weights * rvs
rad = np.sum(probs)
if rad > 0:
probs = probs / rad
return probs
def _proba_cluster(self, x, distribs=None):
"""
Calculate membership probabilities to each cluster for each sample
Arguments
---------
- x: array_like
samples where probabilities must be calculated
- distribs : multivariate_normal objects list (optional)
array of membership distributions. If None, use self ones.
Returns
-------
- probs: array_like
x membership probabilities to each cluster.
Examples :
----------
x:
[[ 0. 0.]
[ 0. 1.]
[ 1. 0.]
[ 1. 1.]]
prob:
[[ 1.49050563e-02 9.85094944e-01]
[ 9.90381299e-01 9.61870088e-03]
[ 9.99208990e-01 7.91009759e-04]
[ 1.48949963e-03 9.98510500e-01]]
"""
if distribs is None:
distribs = self.distribs
if self.n_clusters == 1:
probs = np.ones((x.shape[0], 1))
else:
probs = np.array(
[self._proba_cluster_one_sample(x[i], distribs) for i in range(len(x))]
)
return probs
|
bouhlelma/smt
|
smt/applications/moe.py
|
Python
|
bsd-3-clause
| 20,407
|
[
"MOE"
] |
f234a81dc9a4df5cbae028f93a2c3daf90d513d1326cfd42c9866daec249559c
|
"""Generalized Gauss-Laguerre quadrature rule."""
import numpy
from scipy.special import gamma
import chaospy
from .hypercube import hypercube_quadrature
def laguerre(order, alpha=0., physicist=False):
r"""
Generalized Gauss-Laguerre quadrature rule.
Compute the sample points and weights for Gauss-Laguerre quadrature. The
sample points are the roots of the nth degree Laguerre polynomial. These
sample points and weights correctly integrate polynomials of degree
:math:`2N-1` or less.
Gaussian quadrature come in two variants: physicist and probabilist. For
Gauss-Laguerre physicist means a weight function :math:`x^\alpha e^{-x}`
and weights that sum to :math`\Gamma(\alpha+1)`, and probabilist means a
weight function is :math:`x^\alpha e^{-x}` and sum to 1.
Args:
order (int):
The quadrature order.
alpha (float):
Shape parameter. Defaults to non-generalized Laguerre if 0.
physicist (bool):
Use physicist weights instead of probabilist.
Returns:
abscissas (numpy.ndarray):
The ``order+1`` quadrature points for where to evaluate the model
function with.
weights (numpy.ndarray):
The quadrature weights associated with each abscissas.
Examples:
>>> abscissas, weights = chaospy.quadrature.laguerre(2)
>>> abscissas
array([[0.41577456, 2.29428036, 6.28994508]])
>>> weights
array([0.71109301, 0.27851773, 0.01038926])
See also:
:func:`chaospy.quadrature.gaussian`
"""
order = int(order)
coefficients = chaospy.construct_recurrence_coefficients(
order=order, dist=chaospy.Gamma(alpha+1))
[abscissas], [weights] = chaospy.coefficients_to_quadrature(coefficients)
weights *= gamma(alpha+1) if physicist else 1
return abscissas[numpy.newaxis], weights
|
jonathf/chaospy
|
chaospy/quadrature/laguerre.py
|
Python
|
mit
| 1,910
|
[
"Gaussian"
] |
8baf800306a01e2132f01c0e9d1b0fa654fc36de4b52596a63d7a98ac17a7cb7
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************************************************
**LBOutputVzOfX** - controls output of the velocity component profile
***********************************************************************
Child class derived from the abstract class :class:`espressopp.analysis.LBOutput`.
It computes and outputs simulation progress (finished step) and controls flux
conservation when using MD to LB coupling.
.. function:: espressopp.analysis.LBOutputVzOfX(system,latticeboltzmann)
:param system: system object defined earlier in the python-script
:param latticeboltzmann: lattice boltzmann object defined earlier in the python-script
.. Note::
this class should be called from external analysis class :class:`espressopp.integrator.ExtAnalyze`
with specified periodicity of invokation and after this added to the integrator. See an example for details.
Example to call the profiler:
>>> # initialise profiler (for example with the name outputVzOfX) with system and
>>> # lattice boltzmann objects as parameters:
>>> outputVzOfX = espressopp.analysis.LBOutputVzOfX(system,lb)
>>>
>>> # initialise external analysis object (for example extAnalysisNum3) with
>>> # previously created profiler and periodicity of invocation in steps:
>>> extAnalysisNum3=espressopp.integrator.ExtAnalyze(outputVzOfX,100)
>>>
>>> # add the external analysis object as an extension to the integrator
>>> integrator.addExtension(extAnalysisNum3)
.. function:: espressopp.analysis.LBOutputVzOfX(system, latticeboltzmann)
:param system:
:param latticeboltzmann:
:type system:
:type latticeboltzmann:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.LBOutput import *
from _espressopp import analysis_LBOutput_VzOfX
class LBOutputVzOfXLocal(LBOutputLocal, analysis_LBOutput_VzOfX):
def __init__(self, system, latticeboltzmann):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_LBOutput_VzOfX, system, latticeboltzmann)
if pmi.isController :
class LBOutputVzOfX(LBOutput):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.LBOutputVzOfXLocal',
pmicall = ["writeOutput"]
)
|
capoe/espressopp.soap
|
src/analysis/LBOutputVzOfX.py
|
Python
|
gpl-3.0
| 3,184
|
[
"ESPResSo"
] |
a7040bbedbab0827c3e4c088914e1d0f79387b93cdc7972c6840b48af001b0f3
|
from PyDSTool import pointset_to_traj, pointset_to_vars, Pointset, QuantSpec, makeSeqUnique
from PyDSTool import prepJacobian, expr2fun, remain, isincreasing
import copy
import numpy as np
from math import ceil
def make_noise_signal(dt, t_end, mean, stddev, num_cells, seed=None):
"""Helper function: Gaussian white noise at sample rate = dt for 1 or more cells,
for a duration of t_end."""
if seed is not None:
np.random.seed(seed)
N = ceil(t_end*1./dt)
t = np.linspace(0, t_end, N)
coorddict = {}
for cellnum in range(num_cells):
coorddict['noise%i' % (cellnum+1)] = np.random.normal(0, stddev, N)
vpts = Pointset(coorddict=coorddict, indepvararray=t)
return pointset_to_vars(vpts, discrete=False)
def make_spike_signal(ts, dur, tmax, loval=0, hival=1, dt=0.1):
"""Helper function: Square-pulse spike signal between two levels, loval
and hival. Pulses occur at times given by ts array for duration given by
dur scalar, from time 0 to tmax.
Returns a single-entry dictionary of Variable objects with key 'Istim'.
Default loval = 0, hival = 1.
To improve performance with adaptive time-step solvers, three (two before,
one after) points are added before and after each pulse, with a minimum
step time given by dt.
"""
assert len(ts) > 0, "No spike time events provided!"
assert isincreasing(ts), "This function expects strictly increasing times"
assert ts[0] != 0, "This function does not support initial step up at t=0"
assert dur > dt, "Duration must be larger than dt"
times = [0]
vals = [loval]
# check that ts are separated by at least dur+4*dt
assert all(np.diff(ts) > dur+4*dt), "Separate events by at least 4*dt"
assert tmax > ts[-1]+dur, "tmax must be larger than last event end time"
for t in ts:
times.extend([t-2.9*dt, t-dt, t, t+dur-dt, t+dur, t+dur+dt])
vals.extend([loval, loval, hival, hival, hival, loval])
if tmax > ts[-1]+dur+dt:
times.append(tmax)
vals.append(loval)
coorddict = {'Istim': vals}
vpts = Pointset(coorddict=coorddict, indepvararray=times)
return pointset_to_vars(vpts, discrete=False)
def freq(traj):
evs = traj.getEventTimes('thresh_ev')
if len(evs) == 0:
return 0
elif len(evs) == 1:
print "Not enough events found"
return 0
else:
return 1000./(evs[-1] - evs[-2])
def amp(pts):
return max(pts['v']) - min(pts['v'])
def pcw_protocol(DS, prot_list):
"""protocol list of dictionaries with any of the keys 'pars',
'ics', or float 'tdur' (relative time duration of piece).
Preserves events in the resulting Pointset.
"""
orig_ics = DS.initialconditions.copy()
t = 0
for i, stage_dict in enumerate(prot_list):
if 'pars' in stage_dict:
DS.set(pars=stage_dict['pars'])
if 'ics' in stage_dict:
DS.set(ics=stage_dict['ics'])
if 'tdata' in stage_dict:
raise NotImplementedError("Replace 'tdata' with 'tdur' float value")
if 'tdur' in stage_dict:
DS.set(tdata=[t, t+stage_dict['tdur']])
traj = DS.compute('test')
if i == 0:
pts = traj.sample()
t = pts['t'][-1]
DS.set(ics=pts[-1])
else:
new_pts = traj.sample()
pts.extend(new_pts, skipMatchingIndepvar=True)
t = new_pts['t'][-1]
DS.set(ics=pts[-1])
DS.set(ics=orig_ics)
return pointset_to_traj(pts, events=DS.eventstruct.events), pts
def thresh_Naka_Rushton_fndef(N=2, half_on=120, max_val=100, with_if=True,
sys_pars=None):
"""Can specify strings or numbers for half_on and max_val arguments, in case of
including parameters or other variables in the definitions.
(Don't forget to declare those parameters, in that case.)
Use the with_if=False case to ensure Jacobians can be calculated.
Use sys_pars list to provide names of any model parameters or functions
that will be declared elsewhere.
"""
assert N == int(N), "Provide integer N"
extra_pars = []
if sys_pars is None:
sys_pars = []
if not isinstance(half_on, str):
half_on = str(half_on)
else:
Q = QuantSpec('h', half_on)
# don't add model system parameters to function parameter list
extra_pars.extend(remain(Q.freeSymbols, sys_pars))
if not isinstance(max_val, str):
max_val = str(max_val)
else:
Q = QuantSpec('m', max_val)
extra_pars.extend(remain(Q.freeSymbols, sys_pars))
if with_if:
return (['x']+extra_pars,
'if(x>0,%s*pow(x,%i)/(pow(%s,%i) + pow(x,%i)),0)' % (max_val, N, half_on, N, N))
else:
return (['x']+extra_pars,
'%s*pow(x,%i)/(pow(%s,%i) + pow(x,%i))' % (max_val, N, half_on, N, N))
def thresh_exp_fndef(sigma, half_on, max_val):
return (['x'], '1/(1+exp((%f-x)/%f))' % (half_on, sigma))
def thresh_tanh_fndef(N, half_on, max_val):
# not complete
raise NotImplementedError("Class not complete!")
assert N == int(N), "Provide integer N"
return (['x'],
'if(x > 0, ???, 0)' % (max_val, N, half_on))
def make_Jac(DS, varnames=None):
if varnames is None:
varnames = DS.funcspec.vars
subdomain = {}
fixedvars = remain(DS.funcspec.vars, varnames)
for k in fixedvars:
subdomain[k] = DS.initialconditions[k]
jac, new_fnspecs = prepJacobian(DS.funcspec._initargs['varspecs'], varnames,
DS.funcspec._initargs['fnspecs'])
scope = copy.copy(DS.pars)
scope.update(subdomain)
scope.update(new_fnspecs)
return expr2fun(jac, ensure_args=['t'], **scope)
|
robclewley/compneuro
|
common_lib.py
|
Python
|
bsd-3-clause
| 5,785
|
[
"Gaussian"
] |
f65b8b1a4a3a585ac9086c32560da0a9be13a919695d1d5b2f2ed2988fb7aa10
|
#!/usr/bin/env python
from __future__ import with_statement
__author__ = 'Brian Blais <bblais@bryant.edu>'
__version__ = (0, 1, 7)
import sys
from compiler import parse, walk
from compiler.consts import *
import os
import subprocess
from optparse import OptionParser
import re
from first_pass import *
from second_pass import *
from definitions import *
# pynxc_root=os.getcwd()
pynxc_root = os.path.dirname(os.path.abspath(__file__)) \
.replace("library.zip", "")
def python_to_nxc(pyfile, nxcfile=None, debug=False, dry=False):
filename = pyfile
f = open(filename, 'U')
codestring = f.read()
f.close()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
filestr = codestring
defines = re.findall('\s*DEFINE (.*?)=(.*)', filestr)
filestr = re.sub('\s*DEFINE (.*?)=(.*)', "", filestr)
if debug:
print "Filestr"
print filestr
ast = parse(filestr)
v = FirstPassVisitor(debug=debug)
v.v(ast)
# print "variables assign:", v.variables_assign
v.defines = defines
if nxcfile:
fid = open(nxcfile, 'wt')
else:
fid = sys.stdout
v2 = SecondPassVisitor(v, debug=debug, stream=fid, root=pynxc_root, dry=dry)
v2.v(ast)
v2.flush_main()
v2.flush()
if not fid == sys.stdout:
fid.close()
def download(filename, run=False):
nxc_file = filename.replace('.py', '.nxc')
python_to_nxc(filename, nxc_file)
nxc = pynxc_root + os.sep + os.path.join("nxc", sys.platform, 'nbc')
cmd = [nxc]
cmd.append("-d")
cmd.append("-S=usb")
cmd.append("-v=128")
if run:
cmd.append("-r")
cmd.append(nxc_file)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return (out, err)
def readconfig(fname):
import yaml
config = {'firmware': '105'}
if os.path.exists(fname):
data = yaml.load(open(fname))
config.update(data)
return config
def main():
# config=readconfig('pynxc.yaml')
config = {'firmware': 128}
nxc = os.path.join("nxc", sys.platform, 'nbc')
if not os.path.exists(nxc):
nxc = 'nbc' # expect 'nbc' in the binary PATH
usage = "usage: %prog [options] [filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--compile', dest="compile",
help='compile to nxc code only', default=False,
action="store_true")
parser.add_option('-o', '--output-file', dest="outfile",
help='specify the output file')
parser.add_option('--debug', dest="debug",
help='show debug messages', default=False,
action="store_true")
parser.add_option('--show_nxc', dest="show_nxc",
help='show the nxc code', default=False,
action="store_true")
parser.add_option('-d', '--download', dest="download",
help='download program', default=False,
action="store_true")
parser.add_option('-B', '--bluetooth', dest="bluetooth",
help='enable bluetooth', default=False,
action="store_true")
parser.add_option('-q', '--quiet', dest="quiet",
help='stay quiet (prints no output)', default=False,
action="store_true")
parser.add_option('-r', '--dry', dest="dry",
help='dry run (without headers)', default=False,
action="store_true")
parser.add_option('--firmware', dest="firmware",
help='firmware version (105, 107, or 128)', default=config['firmware'])
parser.add_option('--command', dest="nxc",
help='what is the nxc/nqc command', default=nxc,
metavar="<command>")
options, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
raise SystemExit
options.firmware = config['firmware']
# sanity check on the options
if (options.download) and (options.compile):
print "conflicting options"
parser.print_help()
raise SystemExit
nxc_root, nxc = os.path.split(options.nxc)
s = nxc.lower()
filename = args[0]
for filename in args:
root, ext = os.path.splitext(filename)
nxc_filename = root + ".nxc"
rxe_filename = root + ".rxe"
if options.outfile:
nxc_filename = options.outfile
rxe_filename = options.outfile
python_to_nxc(filename, nxc_filename, debug=options.debug,
dry=options.dry)
if options.dry:
return
if not options.quiet:
print "Wrote %s." % (nxc_filename)
if options.show_nxc:
fid = open(nxc_filename)
print fid.read()
fid.close()
if not options.compile:
cmd = options.nxc + " "
if options.bluetooth:
cmd += ' -BT '
cmd = cmd + "'%s'" % nxc_filename + " -I='%s' -I=%s/ -v=%s -O='%s'" % (nxc_root,
pynxc_root,
options.firmware,
rxe_filename)
if not options.quiet:
print cmd
a = os.system(cmd)
if options.download:
print "Downloading...",
cmd = options.nxc + " "
cmd = cmd + nxc_filename + " -I='%s/' -S=usb -I='%s/' -v=%s -d" % (nxc_root,
pynxc_root,
options.firmware)
a = os.system(cmd)
nxtcom = os.path.join(nxc_root, 'nxtcom')
if options.debug: print nxtcom
if os.path.exists(nxtcom):
cmd = '%s %s' % (nxtcom, rxe_filename)
a = os.system(cmd)
print "done."
return
if __name__ == "__main__":
sys.exit(main())
|
xlcteam/pynxc
|
pynxc/pynxc.py
|
Python
|
bsd-3-clause
| 6,382
|
[
"Brian"
] |
fe27db34c30374a4e4198db2dc21da7f9aa86455d63821af6246b46b207a6984
|
from scipy.interpolate import griddata
import sys
import numpy as np
import vtk
from vtk.util import numpy_support
reader = vtk.vtkPolyDataReader()
reader.SetFileName(sys.argv[1])
reader.Update()
polydata = reader.GetOutput()
nodes_vtk_array= polydata.GetPoints().GetData()
#The "Temperature" field is the third scalar in my vtk file
temperature_vtk_array = polydata.GetCellData().GetArray(0)
#Get the coordinates of the nodes and their temperatures
nodes_nummpy_array = numpy_support.vtk_to_numpy(nodes_vtk_array)
x,y,z= nodes_nummpy_array[:,0] , nodes_nummpy_array[:,1] , nodes_nummpy_array[:,2]
temperature_numpy_array = numpy_support.vtk_to_numpy(temperature_vtk_array)
T = temperature_numpy_array
T = T*100. #
C=numpy_support.numpy_to_vtk(T)
polydata.GetCellData().SetScalars(C)
reader.Update()
writer=vtk.vtkPolyDataWriter()
writer.SetFileName("test.vtk")
writer.SetFileTypeToASCII()
writer.SetInputConnection(reader.GetOutputPort())
writer.Write()
|
blaisb/cfdemUtilities
|
paraview/vtkTest.py
|
Python
|
lgpl-3.0
| 961
|
[
"VTK"
] |
a5b80105d8c476bc9f98679b28d757e76f2188eb87b72c7e4f8b10bbeb9eb331
|
import os, os.path
import random
import string
import datetime, time
import logging
import Bio.Entrez
import util
Bio.Entrez.email = "nathan@acceleration.net"
Bio.Entrez.tool = "Biopython->pynpact"
MAX_TO_SUMMARIZE = 10
logger = logging.getLogger(__name__)
class TooManyResponsesException(Exception):
term = None
summaries = None
count = None
WebEnv = None
QueryKey = None
def __init__(self, term=None, count=None, Webenv=None, QueryKey=None):
self.term = term
self.count = count
self.WebEnv = Webenv
self.QueryKey = QueryKey
class EntrezSession(object):
#db = 'genome'
#http://www.ncbi.nlm.nih.gov/About/news/17Nov2011.html
#db = 'nucleotide'
db = 'nuccore'
WebEnv = None
QueryKey = None
lib_path = None
summaries = None
def __init__(self, lib_path, **kwargs):
self.lib_path = lib_path
self.__dict__.update(kwargs)
def reset(self):
self.QueryKey = None
self.WebEnv = None
self.result_count = None
def has_session(self):
return self.QueryKey and len(self.QueryKey) and \
self.WebEnv and len(self.WebEnv)
@util.log_time(logger)
def search(self, term):
logger.info(
"Starting Entrez query for %r, session=%s",
term, self.has_session())
resp = Bio.Entrez.read(Bio.Entrez.esearch(db=self.db, term=term,
usehistory=True,
query_key=self.QueryKey,
webenv=self.WebEnv))
self.QueryKey = resp['QueryKey']
self.WebEnv = resp['WebEnv']
self.result_count = int(resp['Count'])
logger.debug("Got back %s results.", self.result_count)
return resp
@util.log_time(logger)
def _summarize(self):
logger.info("Summarizing from %s, %s", self.WebEnv, self.QueryKey)
self.summaries = Bio.Entrez.read(
Bio.Entrez.esummary(
db=self.db, retmax=20, webenv=self.WebEnv, query_key=self.QueryKey))
def summarize(self):
if not self.summaries:
self._summarize()
return self.summaries
@util.log_time(logger)
def fetch(self, summary=None, filename=None):
if not summary:
if self.result_count == 1:
summary = self.summarize()[0]
else:
raise TooManyResponsesException()
id = summary['Id']
logger.info("Starting fetch of Id: %s", id)
if filename is None:
base = (
summary.get('Assembly_Accession') or
summary.get('Caption'))
if not base:
logger.warning(
"Couldn't find a filename in the summary. Id: %s", id)
base = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(16))
filename = os.path.join(self.lib_path, base + ".gbk")
# if os.path.exists(filename):
# date = (summary.get('UpdateDate') or
# summary.get('Update_Date') or
# summary.get('CreateDate') or
# summary.get('Create_Date'))
if not os.path.exists(filename):
# or datetime.datetime.fromtimestamp(
# os.path.getmtime(filename)) < update_date:
# file should be downloaded.
net_handle = Bio.Entrez.efetch(
db=self.db, id=id, rettype='gbwithparts', retmode='text')
logger.debug("Streaming handle to %r.", filename)
with util.mkstemp_rename(filename) as f:
bytes = util.stream_to_file(net_handle, f)
logger.info(
"Saved %s to %s.", util.pprint_bytes(bytes), filename)
else:
logger.debug("Using already present file %r", filename)
return filename
def fetch_id(self, id):
logger.info("Starting fetch_id(%s)", id)
summaries = Bio.Entrez.read(Bio.Entrez.esummary(db=self.db, id=id))
if len(summaries):
return self.fetch(summary=summaries[0])
else:
return None
def to_url(self, term):
"""Convert the query we've done to a url that will load ncbi's site."""
fmt = "http://www.ncbi.nlm.nih.gov/sites/entrez?db={0}&term={1}"
return fmt.format(self.db,term)
def to_session_url(self):
"""Convert the query we've done to a url that will load ncbi's site."""
fmt = "http://www.ncbi.nlm.nih.gov/sites/entrez?db={0}&cmd=HistorySearch&querykey={1}&tab=&WebEnv={2}"
return fmt.format(self.db,self.QueryKey,self.WebEnv)
ENTREZ_CACHE = {}
class CachedEntrezSession(EntrezSession):
def search(self, term):
self.term = term
self.summaries = ENTREZ_CACHE.get(term)
if self.summaries:
logger.debug('CachedEntrezSession hit for %s', term)
self.result_count = len(self.summaries)
else:
super(CachedEntrezSession, self).search(term)
def summarize(self):
if not self.summaries:
ENTREZ_CACHE[self.term] = super(CachedEntrezSession, self).summarize()
return self.summaries
# [{'Status': 'Completed',
# 'Comment': ' ',
# 'Caption': 'NC_014248',
# 'Title': "'Nostoc azollae' 0708 chromosome, complete genome",
# 'CreateDate': '2010/06/16',
# 'Extra': 'gi|298489614|ref|NC_014248.1||gnl|NCBI_GENOMES|26219[26219]',
# 'TaxId': 551115,
# 'ReplacedBy': '',
# u'Item': [],
# 'Length': 5354700,
# 'Flags': 256,
# 'UpdateDate': '2011/04/06',
# u'Id': '26219',
# 'Gi': 2621}
# ]
|
victor-lin/npact
|
pynpact/pynpact/entrez.py
|
Python
|
bsd-3-clause
| 5,770
|
[
"Biopython"
] |
f56d61780f2db40904934f899cfaf9fd4b32ea21500d1d5bfec7f07ad379f8ac
|
#!/usr/bin/env python
"""
Author: Mateusz Malinowski
Email: mmalinow@mpi-inf.mpg.de
The script assumes there are two files
- first file with ground truth answers
- second file with predicted answers
both answers are line-aligned
The script also assumes that answer items are comma separated.
For instance, chair,table,window
It is also a set measure, so not exactly the same as accuracy
even if dirac measure is used since {book,book}=={book}, also {book,chair}={chair,book}
Logs:
18.02.2016 - added partitioning wrt. answers
17.10.2015 - abstracted the metric computations away
05.09.2015 - white spaces surrounding words are stripped away so that {book, chair}={book,chair}
"""
import sys
#import enchant
from numpy import prod
from nltk.corpus import wordnet as wn
def file2list(filepath):
with open(filepath,'r') as f:
lines =[k for k in
[k.strip() for k in f.readlines()]
if len(k) > 0]
return lines
def list2file(filepath,mylist):
mylist='\n'.join(mylist)
with open(filepath,'w') as f:
f.writelines(mylist)
def items2list(x):
"""
x - string of comma-separated answer items
"""
return [l.strip() for l in x.split(',')]
def fuzzy_set_membership_measure(x,A,m):
"""
Set membership measure.
x: element
A: set of elements
m: point-wise element-to-element measure m(a,b) ~ similarity(a,b)
This function implments a fuzzy set membership measure:
m(x \in A) = max_{a \in A} m(x,a)}
"""
return 0 if A==[] else max(map(lambda a: m(x,a), A))
def score_it(A,T,m):
"""
A: list of A items
T: list of T items
m: set membership measure
m(a \in A) gives a membership quality of a into A
This function implements a fuzzy accuracy score:
score(A,T) = min{prod_{a \in A} m(a \in T), prod_{t \in T} m(a \in A)}
where A and T are set representations of the answers
and m is a measure
"""
if A==[] and T==[]:
return 1
# print A,T
score_left=0 if A==[] else prod(map(lambda a: m(a,T), A))
score_right=0 if T==[] else prod(map(lambda t: m(t,A),T))
return min(score_left,score_right)
# implementations of different measure functions
def dirac_measure(a,b):
"""
Returns 1 iff a=b and 0 otherwise.
"""
if a==[] or b==[]:
return 0.0
return float(a==b)
def wup_measure(a,b,similarity_threshold=0.925):
"""
Returns Wu-Palmer similarity score.
More specifically, it computes:
max_{x \in interp(a)} max_{y \in interp(b)} wup(x,y)
where interp is a 'interpretation field'
"""
def get_semantic_field(a):
weight = 1.0
semantic_field = wn.synsets(a,pos=wn.NOUN)
return (semantic_field,weight)
def get_stem_word(a):
"""
Sometimes answer has form word\d+:wordid.
If so we return word and downweight
"""
weight = 1.0
return (a,weight)
global_weight=1.0
(a,global_weight_a)=get_stem_word(a)
(b,global_weight_b)=get_stem_word(b)
global_weight = min(global_weight_a,global_weight_b)
if a==b:
# they are the same
return 1.0*global_weight
if a==[] or b==[]:
return 0
interp_a,weight_a = get_semantic_field(a)
interp_b,weight_b = get_semantic_field(b)
if interp_a == [] or interp_b == []:
return 0
# we take the most optimistic interpretation
global_max=0.0
for x in interp_a:
for y in interp_b:
local_score=x.wup_similarity(y)
if local_score > global_max:
global_max=local_score
# we need to use the semantic fields and therefore we downweight
# unless the score is high which indicates both are synonyms
if global_max < similarity_threshold:
interp_weight = 0.1
else:
interp_weight = 1.0
final_score=global_max*weight_a*weight_b*interp_weight*global_weight
return final_score
def get_metric_score(gt_list, pred_list, threshold):
"""
Computes metric score.
In:
gt_list - list of gt answers
pred_list - list of predicted answers
threshold
Out:
metric score
"""
if threshold == -1:
our_element_membership=dirac_measure
else:
our_element_membership=lambda x,y: wup_measure(x,y,threshold)
our_set_membership=\
lambda x,A: fuzzy_set_membership_measure(x,A,our_element_membership)
score_list=[score_it(items2list(ta),items2list(pa),our_set_membership)
for (ta,pa) in zip(gt_list,pred_list)]
#final_score=sum(map(lambda x:float(x)/float(len(score_list)),score_list))
final_score=float(sum(score_list))/float(len(score_list))
return final_score
def get_class_metric_score(gt_list, pred_list, threshold):
"""
Computes class-based metric score.
In:
gt_list - list of gt answers
pred_list - list of predicted answers
threshold
Out:
class-based metric score
"""
# creates abstract classes
gt_abstract_classes = set(gt_list)
# partition wrt. abstract classes
class_scores = {}
for abstract_class in gt_abstract_classes:
tmp = [(x,k) for k,x in enumerate(gt_list) if x == abstract_class]
gt_list_new, gt_indices = zip(*tmp)
gt_list_new = list(gt_list_new)
gt_indices = list(gt_indices)
pred_list_new = []
for curr_index in gt_indices:
pred_list_new.append(pred_list[curr_index])
score = get_metric_score(gt_list_new, pred_list_new, threshold)
class_scores[abstract_class] = score
return class_scores
###
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage: path to true answers, path to predicted answers, threshold'
print 'If threshold is -1, then the standard Accuracy is used'
sys.exit("3 arguments must be given")
# folders
gt_filepath=sys.argv[1]
pred_filepath=sys.argv[2]
input_gt=file2list(gt_filepath)
input_pred=file2list(pred_filepath)
thresh=float(sys.argv[3])
if thresh == -1:
print 'standard Accuracy is used'
else:
print 'soft WUPS at %1.2f is used' % thresh
final_score = get_metric_score(input_gt, input_pred, thresh)
# filtering to obtain the results
#print 'full score:', score_list
print 'exact final score:', final_score
print 'final score is %2.2f%%' % (final_score * 100.0)
|
mateuszmalinowski/visual_turing_test-tutorial
|
kraino/utils/compute_wups.py
|
Python
|
mit
| 6,503
|
[
"DIRAC"
] |
75c1826d8e560309852c7b985d64b61c1bf1a5f6fee148a8abb2ee6f48f9eb94
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
if self.whiten:
components_ = V / (S[:, np.newaxis] / sqrt(n_samples))
else:
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
soulmachine/scikit-learn
|
sklearn/decomposition/pca.py
|
Python
|
bsd-3-clause
| 22,689
|
[
"Gaussian"
] |
800195dab6007880d881ee2ff3644aba2e85b9bb74012d7d3694e5f378483730
|
import math
from functools import reduce
class Function:
"""
Generic function representation
"""
def __init__(self, start, end):
"""
Initialize a new function object
:param start: First 'relevant' value in the function's domain
:param end: Last 'relevant' value in the function's domain
"""
self.start = start
self.end = end
def __call__(self, x):
raise NotImplementedError()
def points(self, step=0.1):
"""
Generator of tuples (x, f(x)) in the interval [self.start, self.end]
:param step: Distance between points in the discretized function domain
"""
x = self.start
while x < self.end:
yield x, self(x)
x += step
class Compose(Function):
"""
Represents the composition of several functions
"""
def __init__(self, aggr):
"""
Initialize a new composed function
:param aggr: List of tuples (trunc, func) where trunc is an upper limit to func's image
"""
self.aggr = aggr
# Store smallest start in member functions as the own's start
start = reduce(
lambda x, y: x if x[1].start < y[1].start else y,
aggr,
aggr[0]
)[1].start
# Store biggest end in member functions as the own's end
end = reduce(
lambda x, y: x if x[1].end > y[1].end else y,
aggr,
aggr[0]
)[1].end
super().__init__(start, end)
def __call__(self, x):
# Check produced image of each member function and keep the greatest
y = 0
for trunc, func in self.aggr:
yi = min(trunc, func(x)) # Trunc function if necessary
y = max(y, yi)
return y
class Trapezoid(Function):
def __init__(self, a, b, c=None, d=None):
self.a = a
self.b = b
self.c = c
self.d = d
super().__init__(a, d if d else c if c else b)
def __call__(self, x):
if self.a is not None:
if x <= self.a:
return 0 # (inf, a]
if self.a < x < self.b:
return (x - self.a) / (self.b - self.a) # (a, b)
if self.c is None or self.b <= x <= self.c:
return 1 # [b, inf) or [b, c]
if self.d is None or x < self.d:
# Descent (b->c if triangle, c->d if trapezoid)
c, d = (self.c, self.d) if self.d is not None else (self.b, self.c)
return (x - d) / (c - d) # (b, c) or (c, d)
return 0 # [c, inf) or [d, inf)
class Triangle(Trapezoid):
def __init__(self, a, b, c):
super().__init__(a, b, c)
class Sigmoid(Function):
def __init__(self, k, x0):
self.k = k # width
self.x0 = x0 # center
super().__init__(start=-abs(6 / k), end=abs(6 / k)) # thanks Wikipedia
def __call__(self, x):
return 1 / (1 + math.e ** (-self.k * (x - self.x0)))
class Gaussian(Function):
def __init__(self, b, c):
self.b = b # center
self.c = c # width
super().__init__(start=-abs(6 * c), end=abs(6 * c))
def __call__(self, x):
return math.e ** -(((x - self.b) ** 2) / (2 * self.c ** 2))
|
ealmuina/fuzzy-logic-evaluator
|
fuzzy/functions.py
|
Python
|
mit
| 3,281
|
[
"Gaussian"
] |
5e50b076d1c063014d6dbab21b8fe254e8d1dc6b8069ee00742a9fedda4b11dc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developpers.
# =============================================================================
# @file ostap/histos/tests/test_histos_compare.py
# Test module for ostap/histos/compare.py
# - It tests comparision of 1D-histograms
# =============================================================================
"""Test module for ostap/histos/compare.py
- It tests comparision of 1D-histograms
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'ostap.test_histos_compare' )
else :
logger = getLogger ( __name__ )
# =============================================================================
logger.info ( 'Test for 1D-histogram compare')
# =============================================================================
from ostap.math.ve import VE
from ostap.core.core import hID
from ostap.histos.histos import h1_axis
import ostap.histos.compare
from builtins import range
#
## histos for gaussian distributions
#
h1g = ROOT.TH1D ( hID() , '' , 40 , -5 , 5 ) ; h1g.Sumw2()
h2g = ROOT.TH1D ( hID() , '' , 40 , -5 , 5 ) ; h2g.Sumw2()
h3g = ROOT.TH1D ( hID() , '' , 20 , -5 , 5 ) ; h3g.Sumw2()
bins = [ -5 ]
##
random.seed(10)
for i in range(0, 15 ) : bins.append ( random.uniform ( -5 , 5 ) )
bins += [ 5 ]
bins.sort()
h4g = h1_axis ( bins )
#
## histos for uniform distributions
#
h1u = h1g.clone()
h2u = h2g.clone()
h3u = h3g.clone()
h4u = h4g.clone()
#
## histos for exponential distributions
#
h1e = h1g.clone()
h2e = h2g.clone()
h3e = h3g.clone()
h4e = h4g.clone()
## get the value
v = VE(1,1.75**2)
random.seed(10)
for i in range ( 0, 50000 ) :
g1 = v.gauss()
g2 = v.gauss()
g3 = g2 ## the same as g2
g4 = v.gauss()
h1g.Fill ( g1 )
h2g.Fill ( g2 )
h3g.Fill ( g3 )
h4g.Fill ( g4 )
u1 = random.uniform ( -5 , 5 )
u2 = random.uniform ( -5 , 5 )
u3 = u2 #3 the same as u2
u4 = random.uniform ( -5 , 5 )
h1u.Fill ( u1 )
h2u.Fill ( u2 )
h3u.Fill ( u3 )
h4u.Fill ( u4 )
e1 = -1 * random.expovariate( -0.5 ) -5
e2 = -1 * random.expovariate( -0.5 ) -5
e3 = e2 ## the same as e2
e4 = -1 * random.expovariate( -0.5 ) -5
if not -5 < e1 < 5 : continue
if not -5 < e2 < 5 : continue
if not -5 < e3 < 5 : continue
if not -5 < e4 < 5 : continue
h1e.Fill ( e1 )
h2e.Fill ( e2 )
h3e.Fill ( e3 )
h4e.Fill ( e4 )
h5g = h4g.rescale_bins(1)
h5u = h4u.rescale_bins(1)
h5e = h4e.rescale_bins(1)
## compare two histograms
def compare ( h1 , h2 , title = '' , density = False ) :
## r1 = h1.cmp_fit ( h2 , opts = 'WL0Q' , density = density )
## if r1 : logger.info ( 'h1 vs h2 : fit probability is %.5f%% ' % ( r1.Prob()*100 ) )
## else : logger.warning ( 'h1 vs h2 : fit problems ')
## r2 = h2.cmp_fit ( h1 , opts = 'WL0Q' , density = density )
## if r2 : logger.info ( 'h2 vs h1 : fit probability is %.5f%% ' % ( r2.Prob()*100 ) )
## else : logger.warning ( 'h2 vs h1 : fit problems ')
## ct = h1.cmp_cos ( h2 , density = density )
## logger.info ( 'h1 vs h2 : cos(theta) is %s ' % ct )
## dd1 = h1.cmp_dist ( h2 , density = density )
## logger.info ( 'h1 vs h2 : distance is %s ' % dd1 )
## ## dd2 = h1.cmp_dist2 ( h2 , density = density )
## ## logger.info ( 'h1 vs h2 : distance2 is %s ' % dd2 )
logger.info ( "%s\n%s" % ( title , h1.cmp_prnt ( h2 , density = density , title = title , prefix = '# ' ) ) )
logger.info ( "%s\n%s" % ( title , h1.cmp_diff_prnt ( h2 , density = density , title = title , prefix = '# ' ) ) )
# =============================================================================
## compare gaussians
def test_compare_gaussians() :
compare ( h1g , h2g , 'Compare gaussians (1) and (2)' )
compare ( h1g , h3g , 'Compare gaussians (1) and (3)' )
compare ( h1g , h4g , 'Compare gaussians (1) and (4)' )
compare ( h1g , h4g , 'Compare gaussians (1) and (4) with rescale' , density = True )
compare ( h1g , h5g , 'Compare gaussians (1) and (5)' )
compare ( h2g , h3g , 'Compare gaussians (2) and (3) : should be the same!' )
compare ( h2g , h4g , 'Compare gaussians (2) and (4)' )
compare ( h2g , h4g , 'Compare gaussians (2) and (4) with rescale' , density = True )
compare ( h2g , h5g , 'Compare gaussians (2) and (5)' )
compare ( h3g , h4g , 'Compare gaussians (3) and (4)' )
compare ( h3g , h4g , 'Compare gaussians (3) and (4) with rescale' , density = True )
compare ( h3g , h5g , 'Compare gaussians (3) and (5)' )
compare ( h4g , h5g , 'Compare gaussians (4) and (5)' )
def test_compare_uniforms () :
compare ( h1u , h2u , 'Compare uniforms (1) and (2)' )
compare ( h1u , h3u , 'Compare uniforms (1) and (3)' )
compare ( h1u , h4u , 'Compare uniforms (1) and (4)' )
compare ( h1u , h4u , 'Compare uniforms (1) and (4) with rescale' , density = True )
compare ( h1u , h5u , 'Compare uniforms (1) and (5)' )
compare ( h2u , h3u , 'Compare uniforms (2) and (3) : should be the same!' )
compare ( h2u , h4u , 'Compare uniforms (2) and (4)' )
compare ( h2u , h4u , 'Compare uniforms (2) and (4) with rescale' , density = True )
compare ( h2u , h4u , 'Compare uniforms (2) and (5)' )
compare ( h3u , h4u , 'Compare uniforms (3) and (4)' )
compare ( h3u , h4u , 'Compare uniforms (3) and (4) with rescale;' , density = True )
compare ( h3u , h5u , 'Compare uniforms (3) and (5)' )
compare ( h4u , h5u , 'Compare uniforms (4) and (5)' )
def test_compare_exponentials () :
compare ( h1e , h2e , 'Compare exponentials (1) and (2)' )
compare ( h1e , h3e , 'Compare exponentials (1) and (3)' )
compare ( h1e , h4e , 'Compare exponentials (1) and (4)' )
compare ( h1e , h4e , 'Compare exponentials (1) and (4) with rescale' , density = True )
compare ( h1e , h5e , 'Compare exponentials (1) and (5)' )
compare ( h2e , h3e , 'Compare exponentials (2) and (3) : should be the same!' )
compare ( h2e , h4e , 'Compare exponentials (2) and (4)' )
compare ( h2e , h4e , 'Compare exponentials (2) and (4) with rescale' , density = True )
compare ( h2e , h5e , 'Compare exponentials (2) and (5)' )
compare ( h3e , h4e , 'Compare exponentials (3) and (4)' )
compare ( h3e , h4e , 'Compare exponentials (3) and (4) with rescale' , density = True )
compare ( h3e , h5e , 'Compare exponentials (3) and (5)' )
compare ( h4e , h5e , 'Compare exponentials (4) and (5)' )
def test_compare_gauss_vs_uniform() :
_ig = 0
for ig in ( h1g , h2g , h3g , h4g , h5g ) :
_ig += 1
_iu = 0
for iu in ( h1u , h2u , h3u , h4u , h5u ) :
_iu += 1
compare ( ig , iu , 'Compare gaussian (%d) and uniform (%d)' % ( _ig , _iu ) )
def test_compare_gauss_vs_exponent () :
_ig = 0
for ig in ( h1g , h2g , h3g , h4g , h5g ) :
_ig += 1
_ie = 0
for ie in ( h1e , h2e , h3e , h4e , h5e ) :
_ie += 1
compare ( ig , ie , 'Compare gaussian (%d) and exponent (%d)' % ( _ig , _ie ) )
def test_compare_uniform_vs_exponent () :
_iu = 0
for iu in ( h1u , h2u , h3u , h4u , h5u ) :
_iu += 1
_ie = 0
for ie in ( h1e , h2e , h3e , h4e , h5e ) :
_ie += 1
compare ( iu , ie , 'Compare uniform (%d) and exponent (%d)' % ( _iu , _ie ) )
# =============================================================================
if '__main__' == __name__ :
test_compare_gaussians ()
test_compare_uniforms ()
test_compare_exponentials ()
test_compare_gauss_vs_uniform ()
test_compare_gauss_vs_exponent ()
test_compare_uniform_vs_exponent ()
pass
# =============================================================================
## The END
# =============================================================================
|
OstapHEP/ostap
|
ostap/histos/tests/test_histos_compare.py
|
Python
|
bsd-3-clause
| 8,968
|
[
"Gaussian"
] |
54f19699bca65c7a481611e305463f26c8b9d857bc7c13ca851927ac5ff6ed2f
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import operator
import sys
import uuid
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from multiprocessing.pool import ThreadPool
from pyspark import keyword_only, since, SparkContext
from pyspark.ml import Estimator, Predictor, PredictionModel, Model
from pyspark.ml.param.shared import HasRawPredictionCol, HasProbabilityCol, HasThresholds, \
HasRegParam, HasMaxIter, HasFitIntercept, HasTol, HasStandardization, HasWeightCol, \
HasAggregationDepth, HasThreshold, HasBlockSize, HasMaxBlockSizeInMB, Param, Params, \
TypeConverters, HasElasticNetParam, HasSeed, HasStepSize, HasSolver, HasParallelism
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeClassifierParams
from pyspark.ml.regression import _FactorizationMachinesParams, DecisionTreeRegressionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, \
JavaMLReadable, JavaMLReader, JavaMLWritable, JavaMLWriter, \
MLReader, MLReadable, MLWriter, MLWritable, HasTrainingSummary
from pyspark.ml.wrapper import JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.ml.linalg import Vectors
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LinearSVCSummary', 'LinearSVCTrainingSummary',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'RandomForestClassificationSummary', 'RandomForestClassificationTrainingSummary',
'BinaryRandomForestClassificationSummary',
'BinaryRandomForestClassificationTrainingSummary',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'MultilayerPerceptronClassificationSummary',
'MultilayerPerceptronClassificationTrainingSummary',
'OneVsRest', 'OneVsRestModel',
'FMClassifier', 'FMClassificationModel', 'FMClassificationSummary',
'FMClassificationTrainingSummary']
class _ClassifierParams(HasRawPredictionCol, _PredictorParams):
"""
Classifier Params for classification tasks.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Classifier(Predictor, _ClassifierParams, metaclass=ABCMeta):
"""
Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class ClassificationModel(PredictionModel, _ClassifierParams, metaclass=ABCMeta):
"""
Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@abstractproperty
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
raise NotImplementedError()
class _ProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, _ClassifierParams):
"""
Params for :py:class:`ProbabilisticClassifier` and
:py:class:`ProbabilisticClassificationModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class ProbabilisticClassifier(Classifier, _ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Probabilistic Classifier for classification tasks.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@inherit_doc
class ProbabilisticClassificationModel(ClassificationModel,
_ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@abstractmethod
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
raise NotImplementedError()
@inherit_doc
class _JavaClassifier(Classifier, JavaPredictor, metaclass=ABCMeta):
"""
Java Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class _JavaClassificationModel(ClassificationModel, JavaPredictionModel):
"""
Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with :class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
return self._call_java("predictRaw", value)
@inherit_doc
class _JavaProbabilisticClassifier(ProbabilisticClassifier, _JavaClassifier,
metaclass=ABCMeta):
"""
Java Probabilistic Classifier for classification tasks.
"""
pass
@inherit_doc
class _JavaProbabilisticClassificationModel(ProbabilisticClassificationModel,
_JavaClassificationModel):
"""
Java Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class _ClassificationSummary(JavaWrapper):
"""
Abstraction for multiclass classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("3.1.0")
def predictionCol(self):
"""
Field in "predictions" which gives the prediction of each class.
"""
return self._call_java("predictionCol")
@property
@since("3.1.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("3.1.0")
def weightCol(self):
"""
Field in "predictions" which gives the weight of each instance
as a vector.
"""
return self._call_java("weightCol")
@property
def labels(self):
"""
Returns the sequence of labels in ascending order. This order matches the order used
in metrics which are specified as arrays over labels, e.g., truePositiveRateByLabel.
.. versionadded:: 3.1.0
Notes
-----
In most cases, it will be values {0.0, 1.0, ..., numClasses-1}, However, if the
training set is missing a label, then all of the arrays over labels
(e.g., from truePositiveRateByLabel) will be of length numClasses-1 instead of the
expected numClasses.
"""
return self._call_java("labels")
@property
@since("3.1.0")
def truePositiveRateByLabel(self):
"""
Returns true positive rate for each label (category).
"""
return self._call_java("truePositiveRateByLabel")
@property
@since("3.1.0")
def falsePositiveRateByLabel(self):
"""
Returns false positive rate for each label (category).
"""
return self._call_java("falsePositiveRateByLabel")
@property
@since("3.1.0")
def precisionByLabel(self):
"""
Returns precision for each label (category).
"""
return self._call_java("precisionByLabel")
@property
@since("3.1.0")
def recallByLabel(self):
"""
Returns recall for each label (category).
"""
return self._call_java("recallByLabel")
@since("3.1.0")
def fMeasureByLabel(self, beta=1.0):
"""
Returns f-measure for each label (category).
"""
return self._call_java("fMeasureByLabel", beta)
@property
@since("3.1.0")
def accuracy(self):
"""
Returns accuracy.
(equals to the total number of correctly classified instances
out of the total number of instances.)
"""
return self._call_java("accuracy")
@property
@since("3.1.0")
def weightedTruePositiveRate(self):
"""
Returns weighted true positive rate.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedTruePositiveRate")
@property
@since("3.1.0")
def weightedFalsePositiveRate(self):
"""
Returns weighted false positive rate.
"""
return self._call_java("weightedFalsePositiveRate")
@property
@since("3.1.0")
def weightedRecall(self):
"""
Returns weighted averaged recall.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedRecall")
@property
@since("3.1.0")
def weightedPrecision(self):
"""
Returns weighted averaged precision.
"""
return self._call_java("weightedPrecision")
@since("3.1.0")
def weightedFMeasure(self, beta=1.0):
"""
Returns weighted averaged f-measure.
"""
return self._call_java("weightedFMeasure", beta)
@inherit_doc
class _TrainingSummary(JavaWrapper):
"""
Abstraction for Training results.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration. It contains one more element, the initial state,
than number of iterations.
"""
return self._call_java("objectiveHistory")
@property
@since("3.1.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class _BinaryClassificationSummary(_ClassificationSummary):
"""
Binary classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def scoreCol(self):
"""
Field in "predictions" which gives the probability or raw prediction
of each class as a vector.
"""
return self._call_java("scoreCol")
@property
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. versionadded:: 3.1.0
Notes
-----
`Wikipedia reference <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
"""
return self._call_java("roc")
@property
@since("3.1.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
"""
return self._call_java("areaUnderROC")
@property
@since("3.1.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
"""
return self._call_java("pr")
@property
@since("3.1.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("3.1.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
"""
return self._call_java("precisionByThreshold")
@property
@since("3.1.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
"""
return self._call_java("recallByThreshold")
class _LinearSVCParams(_ClassifierParams, HasRegParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth, HasThreshold,
HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearSVC` and :py:class:`LinearSVCModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"The threshold in binary classification applied to the linear model"
" prediction. This threshold can be any real number, where Inf will make"
" all predictions 0.0 and -Inf will make all predictions 1.0.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearSVCParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2,
maxBlockSizeInMB=0.0)
@inherit_doc
class LinearSVC(_JavaClassifier, _LinearSVCParams, JavaMLWritable, JavaMLReadable):
"""
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
Only supports L2 regularization currently.
.. versionadded:: 2.2.0
Notes
-----
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC()
>>> svm.getMaxIter()
100
>>> svm.setMaxIter(5)
LinearSVC...
>>> svm.getMaxIter()
5
>>> svm.getRegParam()
0.0
>>> svm.setRegParam(0.01)
LinearSVC...
>>> svm.getRegParam()
0.01
>>> model = svm.fit(df)
>>> model.setPredictionCol("newPrediction")
LinearSVCModel...
>>> model.getPredictionCol()
'newPrediction'
>>> model.setThreshold(0.5)
LinearSVCModel...
>>> model.getThreshold()
0.5
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.4831, 1.4831])
>>> result = model.transform(test0).head()
>>> result.newPrediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
@since("2.2.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.2.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.2.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.2.0")
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
@since("2.2.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("2.2.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.2.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearSVCModel(_JavaClassificationModel, _LinearSVCParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return LinearSVCTrainingSummary(super(LinearSVCModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lsvc_summary = self._call_java("evaluate", dataset)
return LinearSVCSummary(java_lsvc_summary)
class LinearSVCSummary(_BinaryClassificationSummary):
"""
Abstraction for LinearSVC Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class LinearSVCTrainingSummary(LinearSVCSummary, _TrainingSummary):
"""
Abstraction for LinearSVC Training results.
.. versionadded:: 3.1.0
"""
pass
class _LogisticRegressionParams(_ProbabilisticClassifierParams, HasRegParam,
HasElasticNetParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth,
HasThreshold, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LogisticRegression` and :py:class:`LogisticRegressionModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
lowerBoundsOnCoefficients = Param(Params._dummy(), "lowerBoundsOnCoefficients",
"The lower bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
upperBoundsOnCoefficients = Param(Params._dummy(), "upperBoundsOnCoefficients",
"The upper bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
lowerBoundsOnIntercepts = Param(Params._dummy(), "lowerBoundsOnIntercepts",
"The lower bounds on intercepts if fitting under bound "
"constrained optimization. The bounds vector size must be"
"equal with 1 for binomial regression, or the number of"
"lasses for multinomial regression.",
typeConverter=TypeConverters.toVector)
upperBoundsOnIntercepts = Param(Params._dummy(), "upperBoundsOnIntercepts",
"The upper bounds on intercepts if fitting under bound "
"constrained optimization. The bound vector size must be "
"equal with 1 for binomial regression, or the number of "
"classes for multinomial regression.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_LogisticRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto",
maxBlockSizeInMB=0.0)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self.clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self.clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
@since("2.3.0")
def getLowerBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self.getOrDefault(self.lowerBoundsOnCoefficients)
@since("2.3.0")
def getUpperBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self.getOrDefault(self.upperBoundsOnCoefficients)
@since("2.3.0")
def getLowerBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self.getOrDefault(self.lowerBoundsOnIntercepts)
@since("2.3.0")
def getUpperBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self.getOrDefault(self.upperBoundsOnIntercepts)
@inherit_doc
class LogisticRegression(_JavaProbabilisticClassifier, _LogisticRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(weightCol="weight")
>>> blor.getRegParam()
0.0
>>> blor.setRegParam(0.01)
LogisticRegression...
>>> blor.getRegParam()
0.01
>>> blor.setMaxIter(10)
LogisticRegression...
>>> blor.getMaxIter()
10
>>> blor.clear(blor.maxIter)
>>> blorModel = blor.fit(bdf)
>>> blorModel.setFeaturesCol("features")
LogisticRegressionModel...
>>> blorModel.setProbabilityCol("newProbability")
LogisticRegressionModel...
>>> blorModel.getProbabilityCol()
'newProbability'
>>> blorModel.getMaxBlockSizeInMB()
0.0
>>> blorModel.setThreshold(0.1)
LogisticRegressionModel...
>>> blorModel.getThreshold()
0.1
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> blorModel.evaluate(bdf).accuracy == blorModel.summary.accuracy
True
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> blorModel.predict(test0.head().features)
1.0
>>> blorModel.predictRaw(test0.head().features)
DenseVector([-3.54..., 3.54...])
>>> blorModel.predictProbability(test0.head().features)
DenseVector([0.028, 0.972])
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.newProbability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
>>> model2
LogisticRegressionModel: uid=..., numClasses=2, numFeatures=2
>>> blorModel.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.3.0")
def setLowerBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self._set(lowerBoundsOnCoefficients=value)
@since("2.3.0")
def setUpperBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self._set(upperBoundsOnCoefficients=value)
@since("2.3.0")
def setLowerBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self._set(lowerBoundsOnIntercepts=value)
@since("2.3.0")
def setUpperBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self._set(upperBoundsOnIntercepts=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LogisticRegressionModel(_JavaProbabilisticClassificationModel, _LogisticRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
return LogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryLogisticRegressionSummary(java_blr_summary)
else:
return LogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(_ClassificationSummary):
"""
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary, _TrainingSummary):
"""
Abstraction for multinomial Logistic Regression Training results.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionSummary(_BinaryClassificationSummary,
LogisticRegressionSummary):
"""
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class _DecisionTreeClassifierParams(_DecisionTreeParams, _TreeClassifierParams):
"""
Params for :py:class:`DecisionTreeClassifier` and :py:class:`DecisionTreeClassificationModel`.
"""
def __init__(self, *args):
super(_DecisionTreeClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeClassifier(_JavaProbabilisticClassifier, _DecisionTreeClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed", leafCol="leafId")
>>> model = dt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
DecisionTreeClassificationModel...
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> si3 = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model3 = si3.fit(df3)
>>> td3 = si_model3.transform(df3)
>>> dt3 = DecisionTreeClassifier(maxDepth=2, weightCol="weight", labelCol="indexed")
>>> model3 = dt3.fit(td3)
>>> print(model3.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class DecisionTreeClassificationModel(_DecisionTreeModel, _JavaProbabilisticClassificationModel,
_DecisionTreeClassifierParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class _RandomForestClassifierParams(_RandomForestParams, _TreeClassifierParams):
"""
Params for :py:class:`RandomForestClassifier` and :py:class:`RandomForestClassificationModel`.
"""
def __init__(self, *args):
super(_RandomForestClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0, leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestClassifier(_JavaProbabilisticClassifier, _RandomForestClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> rf.getMinWeightFractionPerNode()
0.0
>>> model = rf.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
RandomForestClassificationModel...
>>> model.setRawPredictionCol("newRawPrediction")
RandomForestClassificationModel...
>>> model.getBootstrap()
True
>>> model.getRawPredictionCol()
'newRawPrediction'
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([2.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.newRawPrediction)
0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel...depth=..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_RandomForestClassifierParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@property
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryRandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
return RandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_rf_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryRandomForestClassificationSummary(java_rf_summary)
else:
return RandomForestClassificationSummary(java_rf_summary)
class RandomForestClassificationSummary(_ClassificationSummary):
"""
Abstraction for RandomForestClassification Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class RandomForestClassificationTrainingSummary(RandomForestClassificationSummary,
_TrainingSummary):
"""
Abstraction for RandomForestClassificationTraining Training results.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationSummary(_BinaryClassificationSummary):
"""
BinaryRandomForestClassification results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationTrainingSummary(BinaryRandomForestClassificationSummary,
RandomForestClassificationTrainingSummary):
"""
BinaryRandomForestClassification training results for a given model.
.. versionadded:: 3.1.0
"""
pass
class _GBTClassifierParams(_GBTParams, _HasVarianceImpurity):
"""
Params for :py:class:`GBTClassifier` and :py:class:`GBTClassifierModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["logistic"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTClassifier(_JavaProbabilisticClassifier, _GBTClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
.. versionadded:: 1.4.0
Notes
-----
Multiclass labels are not currently supported.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTClassifier...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTClassifier...
>>> gbt.getMaxIter()
5
>>> gbt.getFeatureSubsetStrategy()
'all'
>>> model = gbt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
GBTClassificationModel...
>>> model.setThresholds([0.3, 0.7])
GBTClassificationModel...
>>> model.getThresholds()
[0.3, 0.7]
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.1697, -1.1697])
>>> model.predictProbability(test0.head().features)
DenseVector([0.9121, 0.0879])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel...numTrees=5...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0),)],
... ["indexed", "features"])
>>> model.evaluateEachIteration(validation)
[0.25..., 0.23..., 0.21..., 0.19..., 0.18...]
>>> model.numClasses
2
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, impurity="variance",
featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_GBTClassifierParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
return self._call_java("evaluateEachIteration", dataset)
class _NaiveBayesParams(_PredictorParams, HasWeightCol):
"""
Params for :py:class:`NaiveBayes` and :py:class:`NaiveBayesModel`.
.. versionadded:: 3.0.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default), bernoulli " +
"and gaussian.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_NaiveBayesParams, self).__init__(*args)
self._setDefault(smoothing=1.0, modelType="multinomial")
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
@inherit_doc
class NaiveBayes(_JavaProbabilisticClassifier, _NaiveBayesParams, HasThresholds, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values for Multinomial NB and Bernoulli NB must be nonnegative.
Since 3.0.0, it supports Complement NB which is an adaptation of the Multinomial NB.
Specifically, Complement NB uses statistics from the complement of each class to compute
the model's coefficients. The inventors of Complement NB show empirically that the parameter
estimates for CNB are more stable than those for Multinomial NB. Like Multinomial NB, the
input feature values for Complement NB must be nonnegative.
Since 3.0.0, it also supports `Gaussian NB \
<https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Gaussian_naive_Bayes>`_.
which can handle continuous data.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.setFeaturesCol("features")
NaiveBayesModel...
>>> model.getSmoothing()
1.0
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> model.sigma
DenseMatrix(0, 0, [...], ...)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.72..., -0.99...])
>>> model.predictProbability(test0.head().features)
DenseVector([0.32..., 0.67...])
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
>>> nb3 = NaiveBayes().setModelType("gaussian")
>>> model4 = nb3.fit(df)
>>> model4.getModelType()
'gaussian'
>>> model4.sigma
DenseMatrix(2, 2, [0.0, 0.25, 0.0, 0.0], 1)
>>> nb5 = NaiveBayes(smoothing=1.0, modelType="complement", weightCol="weight")
>>> model5 = nb5.fit(df)
>>> model5.getModelType()
'complement'
>>> model5.theta
DenseMatrix(2, 2, [...], 1)
>>> model5.sigma
DenseMatrix(0, 0, [...], ...)
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class NaiveBayesModel(_JavaProbabilisticClassificationModel, _NaiveBayesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@property
@since("3.0.0")
def sigma(self):
"""
variance of each feature.
"""
return self._call_java("sigma")
class _MultilayerPerceptronParams(_ProbabilisticClassifierParams, HasSeed, HasMaxIter,
HasTol, HasStepSize, HasSolver, HasBlockSize):
"""
Params for :py:class:`MultilayerPerceptronClassifier`.
.. versionadded:: 3.0.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_MultilayerPerceptronParams, self).__init__(*args)
self._setDefault(maxIter=100, tol=1E-6, blockSize=128, stepSize=0.03, solver="l-bfgs")
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
@inherit_doc
class MultilayerPerceptronClassifier(_JavaProbabilisticClassifier, _MultilayerPerceptronParams,
JavaMLWritable, JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
>>> mlp.setMaxIter(100)
MultilayerPerceptronClassifier...
>>> mlp.getMaxIter()
100
>>> mlp.getBlockSize()
128
>>> mlp.setBlockSize(1)
MultilayerPerceptronClassifier...
>>> mlp.getBlockSize()
1
>>> model = mlp.fit(df)
>>> model.setFeaturesCol("features")
MultilayerPerceptronClassificationModel...
>>> model.getMaxIter()
100
>>> model.getLayers()
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.predict(testDF.head().features)
1.0
>>> model.predictRaw(testDF.head().features)
DenseVector([-16.208, 16.344])
>>> model.predictProbability(testDF.head().features)
DenseVector([0.0, 1.0])
>>> model.transform(testDF).select("features", "prediction").show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.getLayers() == model2.getLayers()
True
>>> model.weights == model2.weights
True
>>> model.transform(testDF).take(1) == model2.transform(testDF).take(1)
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.getLayers() == model.getLayers()
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction")
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction"):
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
class MultilayerPerceptronClassificationModel(_JavaProbabilisticClassificationModel,
_MultilayerPerceptronParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return MultilayerPerceptronClassificationTrainingSummary(
super(MultilayerPerceptronClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_mlp_summary = self._call_java("evaluate", dataset)
return MultilayerPerceptronClassificationSummary(java_mlp_summary)
class MultilayerPerceptronClassificationSummary(_ClassificationSummary):
"""
Abstraction for MultilayerPerceptronClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class MultilayerPerceptronClassificationTrainingSummary(MultilayerPerceptronClassificationSummary,
_TrainingSummary):
"""
Abstraction for MultilayerPerceptronClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
class _OneVsRestParams(_ClassifierParams, HasWeightCol):
"""
Params for :py:class:`OneVsRest` and :py:class:`OneVsRestModelModel`.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, _OneVsRestParams, HasParallelism, MLReadable, MLWritable):
"""
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> ovr.getRawPredictionCol()
'rawPrediction'
>>> ovr.setPredictionCol("newPrediction")
OneVsRest...
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().newPrediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().newPrediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().newPrediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().newPrediction
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.transform(test2).columns
['features', 'rawPrediction', 'newPrediction']
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
super(OneVsRest, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
"""
return self._set(classifier=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
weightCol = None
if (self.isDefined(self.weightCol) and self.getWeightCol()):
if isinstance(classifier, HasWeightCol):
weightCol = self.getWeightCol()
else:
warnings.warn("weightCol is ignored, "
"as it is not supported by {} now.".format(classifier))
if weightCol:
multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol)
else:
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
if weightCol:
paramMap[classifier.weightCol] = weightCol
return classifier.fit(trainingDataset, paramMap)
pool = ThreadPool(processes=min(self.getParallelism(), numClasses))
models = pool.map(trainSingleClass, range(numClasses))
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Examples
--------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRest`
Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
rawPredictionCol = java_stage.getRawPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
parallelism = java_stage.getParallelism()
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
rawPredictionCol=rawPredictionCol, classifier=classifier,
parallelism=parallelism)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage.setWeightCol(java_stage.getWeightCol())
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.setWeightCol(self.getWeightCol())
_java_obj.setRawPredictionCol(self.getRawPredictionCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestReader(cls)
def write(self):
if isinstance(self.getClassifier(), JavaMLWritable):
return JavaMLWriter(self)
else:
return OneVsRestWriter(self)
class _OneVsRestSharedReadWrite:
@staticmethod
def saveImpl(instance, sc, path, extraMetadata=None):
skipParams = ['classifier']
jsonParams = DefaultParamsWriter.extractJsonParams(instance, skipParams)
DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams,
extraMetadata=extraMetadata)
classifierPath = os.path.join(path, 'classifier')
instance.getClassifier().save(classifierPath)
@staticmethod
def loadClassifier(path, sc):
classifierPath = os.path.join(path, 'classifier')
return DefaultParamsReader.loadParamsInstance(classifierPath, sc)
@staticmethod
def validateParams(instance):
elems_to_check = [instance.getClassifier()]
if isinstance(instance, OneVsRestModel):
elems_to_check.extend(instance.models)
for elem in elems_to_check:
if not isinstance(elem, MLWritable):
raise ValueError(f'OneVsRest write will fail because it contains {elem.uid} '
f'which is not writable.')
@inherit_doc
class OneVsRestReader(MLReader):
def __init__(self, cls):
super(OneVsRestReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
ova = OneVsRest(classifier=classifier)._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(ova, metadata, skipParams=['classifier'])
return ova
@inherit_doc
class OneVsRestWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
_OneVsRestSharedReadWrite.saveImpl(self.instance, self.sc, path)
class OneVsRestModel(Model, _OneVsRestParams, MLReadable, MLWritable):
"""
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
if not isinstance(models[0], JavaMLWritable):
return
# set java instance
java_models = [model._to_java() for model in self.models]
sc = SparkContext._active_spark_context
java_models_array = JavaWrapper._new_java_array(java_models,
sc._gateway.jvm.org.apache.spark.ml
.classification.ClassificationModel)
# TODO: need to set metadata
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
self._java_obj = \
JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = self.getRawPredictionCol()
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
if self.getRawPredictionCol():
def func(predictions):
predArray = []
for x in predictions:
predArray.append(x)
return Vectors.dense(predArray)
rawPredictionUDF = udf(func)
aggregatedDataset = aggregatedDataset.withColumn(
self.getRawPredictionCol(), rawPredictionUDF(aggregatedDataset[accColName]))
if self.getPredictionCol():
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(lambda predictions: float(max(enumerate(predictions),
key=operator.itemgetter(1))[0]), DoubleType())
aggregatedDataset = aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName]))
return aggregatedDataset.drop(accColName)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRestModel`
Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol)\
.setFeaturesCol(featuresCol)
py_stage._set(labelCol=labelCol)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage._set(weightCol=java_stage.getWeightCol())
py_stage._set(classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.set("weightCol", self.getWeightCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestModelReader(cls)
def write(self):
if all(map(lambda elem: isinstance(elem, JavaMLWritable),
[self.getClassifier()] + self.models)):
return JavaMLWriter(self)
else:
return OneVsRestModelWriter(self)
@inherit_doc
class OneVsRestModelReader(MLReader):
def __init__(self, cls):
super(OneVsRestModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
numClasses = metadata['numClasses']
subModels = [None] * numClasses
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
subModels[idx] = DefaultParamsReader.loadParamsInstance(subModelPath, self.sc)
ovaModel = OneVsRestModel(subModels)._resetUid(metadata['uid'])
ovaModel.set(ovaModel.classifier, classifier)
DefaultParamsReader.getAndSetParams(ovaModel, metadata, skipParams=['classifier'])
return ovaModel
@inherit_doc
class OneVsRestModelWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
instance = self.instance
numClasses = len(instance.models)
extraMetadata = {'numClasses': numClasses}
_OneVsRestSharedReadWrite.saveImpl(instance, self.sc, path, extraMetadata=extraMetadata)
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
instance.models[idx].save(subModelPath)
@inherit_doc
class FMClassifier(_JavaProbabilisticClassifier, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Factorization Machines learning algorithm for classification.
Solver supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.classification import FMClassifier
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> fm = FMClassifier(factorSize=2)
>>> fm.setSeed(11)
FMClassifier...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-1.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(2.0),)], ["features"])
>>> model.predictRaw(test0.head().features)
DenseVector([22.13..., -22.13...])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.transform(test0).select("features", "probability").show(10, False)
+--------+------------------------------------------+
|features|probability |
+--------+------------------------------------------+
|[-1.0] |[0.9999999997574736,2.425264676902229E-10]|
|[0.5] |[0.47627851732981163,0.5237214826701884] |
|[1.0] |[5.491554426243495E-4,0.9994508445573757] |
|[2.0] |[2.005766663870645E-10,0.9999999997994233]|
+--------+------------------------------------------+
...
>>> model.intercept
-7.316665276826291
>>> model.linear
DenseVector([14.8232])
>>> model.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMClassificationModel.load(model_path)
>>> model2.intercept
-7.316665276826291
>>> model2.linear
DenseVector([14.8232])
>>> model2.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
"""
super(FMClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.FMClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
Sets Params for FMClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMClassificationModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMClassificationModel(_JavaProbabilisticClassificationModel, _FactorizationMachinesParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`FMClassifier`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return FMClassificationTrainingSummary(super(FMClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_fm_summary = self._call_java("evaluate", dataset)
return FMClassificationSummary(java_fm_summary)
class FMClassificationSummary(_BinaryClassificationSummary):
"""
Abstraction for FMClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class FMClassificationTrainingSummary(FMClassificationSummary, _TrainingSummary):
"""
Abstraction for FMClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
witgo/spark
|
python/pyspark/ml/classification.py
|
Python
|
apache-2.0
| 126,617
|
[
"Gaussian"
] |
61a6935896663f12e2503aa5fd93fe3a963164208384ad2d5a00f4f6d9c7698e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.